code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : List[Any] = logging.get_logger(__name__)
__snake_case : int = {
'''tiiuae/falcon-40b''': '''https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json''',
'''tiiuae/falcon-7b''': '''https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json''',
}
class lowercase_ ( _a ):
a_ = """falcon"""
a_ = ["""past_key_values"""]
def __init__( self , UpperCamelCase__=6_5_0_2_4 , UpperCamelCase__=4_5_4_4 , UpperCamelCase__=3_2 , UpperCamelCase__=7_1 , UpperCamelCase__=1e-5 , UpperCamelCase__=0.02 , UpperCamelCase__=True , UpperCamelCase__=0.0 , UpperCamelCase__=0.0 , UpperCamelCase__=None , UpperCamelCase__=False , UpperCamelCase__=False , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=False , UpperCamelCase__=1_1 , UpperCamelCase__=1_1 , **UpperCamelCase__ , ) -> int:
"""simple docstring"""
UpperCAmelCase_ = vocab_size
# Backward compatibility with n_embed kwarg
UpperCAmelCase_ = kwargs.pop("n_embed" , __lowerCAmelCase )
UpperCAmelCase_ = hidden_size if n_embed is None else n_embed
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = layer_norm_epsilon
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = hidden_dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = bos_token_id
UpperCAmelCase_ = eos_token_id
UpperCAmelCase_ = num_attention_heads if num_kv_heads is None else num_kv_heads
UpperCAmelCase_ = alibi
UpperCAmelCase_ = new_decoder_architecture
UpperCAmelCase_ = multi_query # Ignored when new_decoder_architecture is True
UpperCAmelCase_ = parallel_attn
UpperCAmelCase_ = bias
super().__init__(bos_token_id=__lowerCAmelCase , eos_token_id=__lowerCAmelCase , **__lowerCAmelCase )
@property
def lowerCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
return self.hidden_size // self.num_attention_heads
@property
def lowerCamelCase_ ( self ) -> Optional[Any]:
"""simple docstring"""
return not self.alibi
| 660 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("ignore", category=UserWarning, module="torch.optim.lr_scheduler")
class __SCREAMING_SNAKE_CASE :
def __init__( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = True , __lowerCAmelCase = False ):
UpperCamelCase__ = scheduler
UpperCamelCase__ = optimizers if isinstance(__lowerCAmelCase , (list, tuple) ) else [optimizers]
UpperCamelCase__ = split_batches
UpperCamelCase__ = step_with_optimizer
UpperCamelCase__ = GradientState()
def _lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*__lowerCAmelCase , **__lowerCAmelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*__lowerCAmelCase , **__lowerCAmelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
UpperCamelCase__ = AcceleratorState().num_processes
for _ in range(__lowerCAmelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , """total_steps""" ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*__lowerCAmelCase , **__lowerCAmelCase )
else:
self.scheduler.step(*__lowerCAmelCase , **__lowerCAmelCase )
def _lowerCamelCase ( self ):
return self.scheduler.get_last_lr()
def _lowerCamelCase ( self ):
return self.scheduler.state_dict()
def _lowerCamelCase ( self , __lowerCAmelCase ):
self.scheduler.load_state_dict(__lowerCAmelCase )
def _lowerCamelCase ( self ):
return self.scheduler.get_lr()
def _lowerCamelCase ( self , *__lowerCAmelCase , **__lowerCAmelCase ):
return self.scheduler.print_lr(*__lowerCAmelCase , **__lowerCAmelCase )
| 619 | 0 |
"""simple docstring"""
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
SCREAMING_SNAKE_CASE : Union[str, Any] = (
"""This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"""
)
def lowercase ( _snake_case : Optional[int] , _snake_case : List[str] ) ->List[Any]:
"""simple docstring"""
warnings.warn(_snake_case , _snake_case )
requires_backends(_snake_case , '''sklearn''' )
return (preds == labels).mean()
def lowercase ( _snake_case : Tuple , _snake_case : Dict ) ->List[Any]:
"""simple docstring"""
warnings.warn(_snake_case , _snake_case )
requires_backends(_snake_case , '''sklearn''' )
__snake_case : List[str] = simple_accuracy(_snake_case , _snake_case )
__snake_case : List[Any] = fa_score(y_true=_snake_case , y_pred=_snake_case )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def lowercase ( _snake_case : Optional[Any] , _snake_case : Optional[Any] ) ->Optional[Any]:
"""simple docstring"""
warnings.warn(_snake_case , _snake_case )
requires_backends(_snake_case , '''sklearn''' )
__snake_case : List[str] = pearsonr(_snake_case , _snake_case )[0]
__snake_case : Optional[Any] = spearmanr(_snake_case , _snake_case )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def lowercase ( _snake_case : List[str] , _snake_case : List[str] , _snake_case : int ) ->Union[str, Any]:
"""simple docstring"""
warnings.warn(_snake_case , _snake_case )
requires_backends(_snake_case , '''sklearn''' )
assert len(_snake_case ) == len(_snake_case ), f"""Predictions and labels have mismatched lengths {len(_snake_case )} and {len(_snake_case )}"""
if task_name == "cola":
return {"mcc": matthews_corrcoef(_snake_case , _snake_case )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(_snake_case , _snake_case )}
elif task_name == "mrpc":
return acc_and_fa(_snake_case , _snake_case )
elif task_name == "sts-b":
return pearson_and_spearman(_snake_case , _snake_case )
elif task_name == "qqp":
return acc_and_fa(_snake_case , _snake_case )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(_snake_case , _snake_case )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(_snake_case , _snake_case )}
elif task_name == "qnli":
return {"acc": simple_accuracy(_snake_case , _snake_case )}
elif task_name == "rte":
return {"acc": simple_accuracy(_snake_case , _snake_case )}
elif task_name == "wnli":
return {"acc": simple_accuracy(_snake_case , _snake_case )}
elif task_name == "hans":
return {"acc": simple_accuracy(_snake_case , _snake_case )}
else:
raise KeyError(_snake_case )
def lowercase ( _snake_case : Dict , _snake_case : List[Any] , _snake_case : Tuple ) ->int:
"""simple docstring"""
warnings.warn(_snake_case , _snake_case )
requires_backends(_snake_case , '''sklearn''' )
if len(_snake_case ) != len(_snake_case ):
raise ValueError(f"""Predictions and labels have mismatched lengths {len(_snake_case )} and {len(_snake_case )}""" )
if task_name == "xnli":
return {"acc": simple_accuracy(_snake_case , _snake_case )}
else:
raise KeyError(_snake_case )
| 229 |
"""simple docstring"""
import os
import re
import unicodedata
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import is_torch_available, logging
if is_torch_available():
import torch
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Tuple = {"""vocab_file""": """spiece.model"""}
SCREAMING_SNAKE_CASE : int = {
"""vocab_file""": {
"""AI-Sweden/gpt-sw3-126m""": """https://huggingface.co/AI-Sweden/gpt-sw3-126m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-350m""": """https://huggingface.co/AI-Sweden/gpt-sw3-350m/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-1.6b""": """https://huggingface.co/AI-Sweden/gpt-sw3-1.6b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-6.7b""": """https://huggingface.co/AI-Sweden/gpt-sw3-6.7b/resolve/main/spiece.model""",
"""AI-Sweden/gpt-sw3-20b""": """https://huggingface.co/AI-Sweden/gpt-sw3-20b/resolve/main/spiece.model""",
}
}
SCREAMING_SNAKE_CASE : Dict = {
"""AI-Sweden/gpt-sw3-126m""": 2048,
"""AI-Sweden/gpt-sw3-350m""": 2048,
"""AI-Sweden/gpt-sw3-1.6b""": 2048,
"""AI-Sweden/gpt-sw3-6.7b""": 2048,
"""AI-Sweden/gpt-sw3-20b""": 2048,
}
class _UpperCAmelCase ( __snake_case ):
'''simple docstring'''
lowerCamelCase__ =VOCAB_FILES_NAMES
lowerCamelCase__ =PRETRAINED_VOCAB_FILES_MAP
lowerCamelCase__ =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCamelCase__ =['input_ids', 'attention_mask']
def __init__(self , a_ , a_=False , a_=False , a_=False , a_=None , a_=None , a_=None , a_=None , a_ = None , **a_ , ):
'''simple docstring'''
__snake_case : Union[str, Any] = {} if sp_model_kwargs is None else sp_model_kwargs
__snake_case : Dict = kwargs.get('''name_or_path''' )
if name_or_path is None:
logger.warning(
'''name_or_path not provided, will work for all GPTSw3 models except gpt-sw3-7b,'''
''' you are testing the model, this can safely be ignored''' )
__snake_case : Any = '''None'''
# Default definitions for our 2 tokenizer versions, with None-checks to enable proper testing
__snake_case : List[str] = '''<|endoftext|>''' if eos_token is None else eos_token
__snake_case : Optional[int] = '''<unk>''' if unk_token is None else unk_token
if "gpt-sw3-7b" in name_or_path:
__snake_case : Union[str, Any] = unk_token if pad_token is None else pad_token
__snake_case : int = eos_token if bos_token is None else bos_token
else:
__snake_case : Union[str, Any] = '''<pad>''' if pad_token is None else pad_token
__snake_case : Any = '''<s>''' if bos_token is None else bos_token
super().__init__(
do_lower_case=a_ , remove_space=a_ , keep_accents=a_ , bos_token=a_ , eos_token=a_ , unk_token=a_ , pad_token=a_ , sp_model_kwargs=self.sp_model_kwargs , **a_ , )
__snake_case : Dict = do_lower_case
__snake_case : int = remove_space
__snake_case : int = keep_accents
__snake_case : Optional[Any] = vocab_file
__snake_case : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(a_ )
# Used for whitespace normalization in input texts
# fmt : off
__snake_case : Optional[int] = {''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', ''' ''', '''''', ''''''}
# fmt : on
# Regular expression to remove non-printing characters (e.g. some unicode control chars) in preprocessing
__snake_case : List[str] = re.compile(
f"""[{''.join(map(a_ , list(range(0 , 9 ) ) + list(range(11 , 32 ) ) + list(range(1_27 , 1_60 ) ) + [1_60, 1_73, 82_03] ) )}]""" )
def __getstate__(self ):
'''simple docstring'''
__snake_case : Union[str, Any] = self.__dict__.copy()
__snake_case : Optional[Any] = None
return state
def __setstate__(self , a_ ):
'''simple docstring'''
__snake_case : Tuple = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
__snake_case : Optional[int] = {}
__snake_case : List[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
@property
# Copied from transformers.models.albert.tokenization_albert.AlbertTokenizer.vocab_size
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
return len(self.sp_model )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Any = self.non_printing_characters_re.sub('''''' , a_ )
# Normalize whitespaces
__snake_case : Union[str, Any] = ''''''.join([char if char not in self.whitespaces else ''' ''' for char in text] )
# NFC Unicode normalization
__snake_case : Tuple = unicodedata.normalize('''NFC''' , a_ )
return text
def SCREAMING_SNAKE_CASE (self , a_ , **a_ ):
'''simple docstring'''
__snake_case : Optional[Any] = self.preprocess_text(a_ )
return self.sp_model.encode(a_ , out_type=a_ )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
return self.sp_model.PieceToId(a_ )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
return self.sp_model.IdToPiece(a_ )
@staticmethod
def SCREAMING_SNAKE_CASE (a_ ):
'''simple docstring'''
return out_string
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : Any = []
__snake_case : Union[str, Any] = ''''''
__snake_case : Optional[int] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
# TODO: Check if this is needed, as it ensures that decode(encode(doc)) != doc by adding extra whitespace in the decoded document
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(a_ ) + token
__snake_case : Dict = True
__snake_case : Any = []
else:
current_sub_tokens.append(a_ )
__snake_case : Any = False
out_string += self.sp_model.decode(a_ )
return out_string
def SCREAMING_SNAKE_CASE (self ):
'''simple docstring'''
__snake_case : Any = {self.convert_ids_to_tokens(a_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def SCREAMING_SNAKE_CASE (self , a_ , a_ = None ):
'''simple docstring'''
if not os.path.isdir(a_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__snake_case : Optional[Any] = os.path.join(
a_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , a_ )
elif not os.path.isfile(self.vocab_file ):
with open(a_ , '''wb''' ) as fi:
__snake_case : str = self.sp_model.serialized_model_proto()
fi.write(a_ )
return (out_vocab_file,)
def SCREAMING_SNAKE_CASE (self , a_ , a_ = False ):
'''simple docstring'''
if isinstance(a_ , a_ ):
__snake_case : Any = self.preprocess_text(a_ )
__snake_case : Optional[int] = self.sp_model.encode(a_ )
else:
__snake_case : str = [self.preprocess_text(a_ ) for t in text]
__snake_case : Optional[int] = self.sp_model.encode(a_ )
if return_tensors is True or return_tensors == "pt":
__snake_case : int = torch.tensor(a_ )
return token_ids
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
return self.sp_model.decode(a_ )
def SCREAMING_SNAKE_CASE (self , a_ ):
'''simple docstring'''
__snake_case : List[str] = [f"""User: {text}""" if is_user else f"""Bot: {text}""" for is_user, text in conversation.iter_texts()]
__snake_case : str = (
f"""{self.eos_token}{self.bos_token}""" + f"""{self.bos_token}""".join(a_ ) + f"""{self.bos_token}Bot:"""
)
return self.encode(text=a_ )
| 229 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase = {'configuration_deit': ['DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'DeiTConfig', 'DeiTOnnxConfig']}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = ['DeiTFeatureExtractor']
lowerCAmelCase = ['DeiTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'DeiTForImageClassification',
'DeiTForImageClassificationWithTeacher',
'DeiTForMaskedImageModeling',
'DeiTModel',
'DeiTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase = [
'TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFDeiTForImageClassification',
'TFDeiTForImageClassificationWithTeacher',
'TFDeiTForMaskedImageModeling',
'TFDeiTModel',
'TFDeiTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_deit import DEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, DeiTConfig, DeiTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_deit import DeiTFeatureExtractor
from .image_processing_deit import DeiTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deit import (
DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
DeiTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deit import (
TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
TFDeiTPreTrainedModel,
)
else:
import sys
lowerCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 43 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCAmelCase : Optional[int] ={
"configuration_vivit": ["VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "VivitConfig"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int =["VivitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int =[
"VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"VivitModel",
"VivitPreTrainedModel",
"VivitForVideoClassification",
]
if TYPE_CHECKING:
from .configuration_vivit import VIVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, VivitConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_vivit import VivitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vivit import (
VIVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
VivitForVideoClassification,
VivitModel,
VivitPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : List[Any] =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 440 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=UpperCAmelCase__ )
class a__( UpperCAmelCase__ ):
lowercase__ = field(default="""image-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
lowercase__ = Features({"""image""": Image()} )
lowercase__ = Features({"""labels""": ClassLabel} )
lowercase__ = "image"
lowercase__ = "labels"
def lowercase_ ( self : Dict , __snake_case : Any ):
if self.label_column not in features:
raise ValueError(F"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] , __snake_case ):
raise ValueError(F"""Column {self.label_column} is not a ClassLabel.""" )
a : int = copy.deepcopy(self )
a : Optional[int] = self.label_schema.copy()
a : int = features[self.label_column]
a : Optional[int] = label_schema
return task_template
@property
def lowercase_ ( self : Optional[Any] ):
return {
self.image_column: "image",
self.label_column: "labels",
} | 714 |
'''simple docstring'''
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCAmelCase: List[Any] = logging.get_logger(__name__)
lowerCAmelCase: Dict = {
'vocab_file': 'vocab.txt',
'merges_file': 'bpe.codes',
}
lowerCAmelCase: List[Any] = {
'vocab_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt',
},
'merges_file': {
'vinai/phobert-base': 'https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes',
'vinai/phobert-large': 'https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes',
},
}
lowerCAmelCase: Optional[Any] = {
'vinai/phobert-base': 2_5_6,
'vinai/phobert-large': 2_5_6,
}
def lowerCamelCase__ ( _A ):
a : List[Any] = set()
a : Union[str, Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
a : Tuple = char
a : Any = set(_A )
return pairs
class a__( lowerCamelCase__ ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Any="<s>" , __snake_case : List[Any]="</s>" , __snake_case : Union[str, Any]="</s>" , __snake_case : List[Any]="<s>" , __snake_case : Optional[Any]="<unk>" , __snake_case : Union[str, Any]="<pad>" , __snake_case : List[Any]="<mask>" , **__snake_case : int , ):
super().__init__(
bos_token=__snake_case , eos_token=__snake_case , unk_token=__snake_case , sep_token=__snake_case , cls_token=__snake_case , pad_token=__snake_case , mask_token=__snake_case , **__snake_case , )
a : Optional[int] = vocab_file
a : Optional[Any] = merges_file
a : int = {}
a : List[str] = 0
a : Union[str, Any] = 1
a : Optional[Any] = 2
a : List[Any] = 3
self.add_from_file(__snake_case )
a : Dict = {v: k for k, v in self.encoder.items()}
with open(__snake_case , encoding='utf-8' ) as merges_handle:
a : Optional[int] = merges_handle.read().split('\n' )[:-1]
a : Any = [tuple(merge.split()[:-1] ) for merge in merges]
a : List[Any] = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
a : List[str] = {}
def lowercase_ ( self : Tuple , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
a : Union[str, Any] = [self.cls_token_id]
a : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase_ ( self : Optional[int] , __snake_case : List[int] , __snake_case : Optional[List[int]] = None , __snake_case : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__snake_case , token_ids_a=__snake_case , already_has_special_tokens=__snake_case )
if token_ids_a is None:
return [1] + ([0] * len(__snake_case )) + [1]
return [1] + ([0] * len(__snake_case )) + [1, 1] + ([0] * len(__snake_case )) + [1]
def lowercase_ ( self : int , __snake_case : List[int] , __snake_case : Optional[List[int]] = None ):
a : Dict = [self.sep_token_id]
a : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def lowercase_ ( self : Optional[int] ):
return len(self.encoder )
def lowercase_ ( self : Dict ):
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ ( self : Optional[int] , __snake_case : Optional[Any] ):
if token in self.cache:
return self.cache[token]
a : int = tuple(__snake_case )
a : int = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
a : Dict = get_pairs(__snake_case )
if not pairs:
return token
while True:
a : str = min(__snake_case , key=lambda __snake_case : self.bpe_ranks.get(__snake_case , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
a , a : Tuple = bigram
a : int = []
a : Optional[int] = 0
while i < len(__snake_case ):
try:
a : List[str] = word.index(__snake_case , __snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
a : Dict = j
if word[i] == first and i < len(__snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
a : Optional[Any] = tuple(__snake_case )
a : Tuple = new_word
if len(__snake_case ) == 1:
break
else:
a : Union[str, Any] = get_pairs(__snake_case )
a : List[Any] = '@@ '.join(__snake_case )
a : int = word[:-4]
a : Optional[int] = word
return word
def lowercase_ ( self : Dict , __snake_case : List[str] ):
a : Optional[int] = []
a : Optional[Any] = re.findall(r'\S+\n?' , __snake_case )
for token in words:
split_tokens.extend(list(self.bpe(__snake_case ).split(' ' ) ) )
return split_tokens
def lowercase_ ( self : List[Any] , __snake_case : Dict ):
return self.encoder.get(__snake_case , self.encoder.get(self.unk_token ) )
def lowercase_ ( self : Dict , __snake_case : Dict ):
return self.decoder.get(__snake_case , self.unk_token )
def lowercase_ ( self : List[Any] , __snake_case : Any ):
a : Union[str, Any] = ' '.join(__snake_case ).replace('@@ ' , '' ).strip()
return out_string
def lowercase_ ( self : str , __snake_case : str , __snake_case : Optional[str] = None ):
if not os.path.isdir(__snake_case ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
a : Dict = os.path.join(
__snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
a : int = os.path.join(
__snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ):
copyfile(self.vocab_file , __snake_case )
if os.path.abspath(self.merges_file ) != os.path.abspath(__snake_case ):
copyfile(self.merges_file , __snake_case )
return out_vocab_file, out_merge_file
def lowercase_ ( self : Tuple , __snake_case : List[str] ):
if isinstance(__snake_case , __snake_case ):
try:
with open(__snake_case , 'r' , encoding='utf-8' ) as fd:
self.add_from_file(__snake_case )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(F"""Incorrect encoding detected in {f}, please rebuild the dataset""" )
return
a : List[str] = f.readlines()
for lineTmp in lines:
a : Any = lineTmp.strip()
a : Dict = line.rfind(' ' )
if idx == -1:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt>\'' )
a : str = line[:idx]
a : Optional[int] = len(self.encoder ) | 195 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> int:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def lowercase_ ( self ) -> Any:
__lowerCamelCase : int = 1
__lowerCamelCase : List[str] = 3
__lowerCamelCase : Optional[int] = (32, 32)
__lowerCamelCase : int = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(SCREAMING_SNAKE_CASE_ )
return image
@property
def lowercase_ ( self ) -> str:
torch.manual_seed(0 )
__lowerCamelCase : Tuple = UNetaDConditionModel(
block_out_channels=(32, 32, 64) , layers_per_block=2 , sample_size=32 , in_channels=7 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=8 , use_linear_projection=SCREAMING_SNAKE_CASE_ , only_cross_attention=(True, True, False) , num_class_embeds=1_00 , )
return model
@property
def lowercase_ ( self ) -> str:
torch.manual_seed(0 )
__lowerCamelCase : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
return model
@property
def lowercase_ ( self ) -> List[Any]:
torch.manual_seed(0 )
__lowerCamelCase : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , hidden_act='gelu' , projection_dim=5_12 , )
return CLIPTextModel(SCREAMING_SNAKE_CASE_ )
def lowercase_ ( self ) -> Dict:
__lowerCamelCase : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : Optional[Any] = self.dummy_cond_unet_upscale
__lowerCamelCase : Dict = DDPMScheduler()
__lowerCamelCase : List[str] = DDIMScheduler(prediction_type='v_prediction' )
__lowerCamelCase : str = self.dummy_vae
__lowerCamelCase : List[Any] = self.dummy_text_encoder
__lowerCamelCase : int = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__lowerCamelCase : List[str] = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCamelCase : Dict = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE_ ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
__lowerCamelCase : Any = StableDiffusionUpscalePipeline(
unet=SCREAMING_SNAKE_CASE_ , low_res_scheduler=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , max_noise_level=3_50 , )
__lowerCamelCase : Optional[Any] = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : List[str] = 'A painting of a squirrel eating a burger'
__lowerCamelCase : Union[str, Any] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
__lowerCamelCase : List[Any] = sd_pipe(
[prompt] , image=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
__lowerCamelCase : Optional[Any] = output.images
__lowerCamelCase : Union[str, Any] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
__lowerCamelCase : int = sd_pipe(
[prompt] , image=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , return_dict=SCREAMING_SNAKE_CASE_ , )[0]
__lowerCamelCase : Tuple = image[0, -3:, -3:, -1]
__lowerCamelCase : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
__lowerCamelCase : Dict = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
__lowerCamelCase : Optional[int] = np.array([0.3_1_1_3, 0.3_9_1_0, 0.4_2_7_2, 0.4_8_5_9, 0.5_0_6_1, 0.4_6_5_2, 0.5_3_6_2, 0.5_7_1_5, 0.5_6_6_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def lowercase_ ( self ) -> Dict:
__lowerCamelCase : List[str] = 'cpu' # ensure determinism for the device-dependent torch.Generator
__lowerCamelCase : List[Any] = self.dummy_cond_unet_upscale
__lowerCamelCase : List[Any] = DDPMScheduler()
__lowerCamelCase : Union[str, Any] = DDIMScheduler(prediction_type='v_prediction' )
__lowerCamelCase : Optional[Any] = self.dummy_vae
__lowerCamelCase : str = self.dummy_text_encoder
__lowerCamelCase : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__lowerCamelCase : str = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCamelCase : Optional[Any] = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE_ ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
__lowerCamelCase : str = StableDiffusionUpscalePipeline(
unet=SCREAMING_SNAKE_CASE_ , low_res_scheduler=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , max_noise_level=3_50 , )
__lowerCamelCase : str = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : Optional[Any] = 'A painting of a squirrel eating a burger'
__lowerCamelCase : List[Any] = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
__lowerCamelCase : Any = output.images
assert image.shape[0] == 2
__lowerCamelCase : List[str] = torch.Generator(device=SCREAMING_SNAKE_CASE_ ).manual_seed(0 )
__lowerCamelCase : List[Any] = sd_pipe(
[prompt] , image=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=20 , num_inference_steps=2 , output_type='np' , )
__lowerCamelCase : Union[str, Any] = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != 'cuda' , 'This test requires a GPU' )
def lowercase_ ( self ) -> str:
__lowerCamelCase : int = self.dummy_cond_unet_upscale
__lowerCamelCase : Union[str, Any] = DDPMScheduler()
__lowerCamelCase : Union[str, Any] = DDIMScheduler(prediction_type='v_prediction' )
__lowerCamelCase : Any = self.dummy_vae
__lowerCamelCase : str = self.dummy_text_encoder
__lowerCamelCase : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__lowerCamelCase : Tuple = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
__lowerCamelCase : Union[str, Any] = Image.fromarray(np.uinta(SCREAMING_SNAKE_CASE_ ) ).convert('RGB' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
__lowerCamelCase : str = unet.half()
__lowerCamelCase : int = text_encoder.half()
# make sure here that pndm scheduler skips prk
__lowerCamelCase : int = StableDiffusionUpscalePipeline(
unet=SCREAMING_SNAKE_CASE_ , low_res_scheduler=SCREAMING_SNAKE_CASE_ , scheduler=SCREAMING_SNAKE_CASE_ , vae=SCREAMING_SNAKE_CASE_ , text_encoder=SCREAMING_SNAKE_CASE_ , tokenizer=SCREAMING_SNAKE_CASE_ , max_noise_level=3_50 , )
__lowerCamelCase : List[Any] = sd_pipe.to(SCREAMING_SNAKE_CASE_ )
sd_pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
__lowerCamelCase : str = 'A painting of a squirrel eating a burger'
__lowerCamelCase : Optional[int] = torch.manual_seed(0 )
__lowerCamelCase : Dict = sd_pipe(
[prompt] , image=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=2 , output_type='np' , ).images
__lowerCamelCase : int = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class UpperCAmelCase_ (unittest.TestCase ):
"""simple docstring"""
def lowercase_ ( self ) -> List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowercase_ ( self ) -> Dict:
__lowerCamelCase : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
__lowerCamelCase : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat.npy' )
__lowerCamelCase : Optional[Any] = 'stabilityai/stable-diffusion-x4-upscaler'
__lowerCamelCase : List[str] = StableDiffusionUpscalePipeline.from_pretrained(SCREAMING_SNAKE_CASE_ )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe.enable_attention_slicing()
__lowerCamelCase : Tuple = 'a cat sitting on a park bench'
__lowerCamelCase : Dict = torch.manual_seed(0 )
__lowerCamelCase : Tuple = pipe(
prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , output_type='np' , )
__lowerCamelCase : Union[str, Any] = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def lowercase_ ( self ) -> List[str]:
__lowerCamelCase : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
__lowerCamelCase : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat_fp16.npy' )
__lowerCamelCase : Optional[int] = 'stabilityai/stable-diffusion-x4-upscaler'
__lowerCamelCase : Dict = StableDiffusionUpscalePipeline.from_pretrained(
SCREAMING_SNAKE_CASE_ , torch_dtype=torch.floataa , )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe.enable_attention_slicing()
__lowerCamelCase : str = 'a cat sitting on a park bench'
__lowerCamelCase : Dict = torch.manual_seed(0 )
__lowerCamelCase : int = pipe(
prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , output_type='np' , )
__lowerCamelCase : List[str] = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def lowercase_ ( self ) -> Optional[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__lowerCamelCase : List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
__lowerCamelCase : Dict = 'stabilityai/stable-diffusion-x4-upscaler'
__lowerCamelCase : Any = StableDiffusionUpscalePipeline.from_pretrained(
SCREAMING_SNAKE_CASE_ , torch_dtype=torch.floataa , )
pipe.to(SCREAMING_SNAKE_CASE_ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__lowerCamelCase : Union[str, Any] = 'a cat sitting on a park bench'
__lowerCamelCase : Union[str, Any] = torch.manual_seed(0 )
__lowerCamelCase : List[Any] = pipe(
prompt=SCREAMING_SNAKE_CASE_ , image=SCREAMING_SNAKE_CASE_ , generator=SCREAMING_SNAKE_CASE_ , num_inference_steps=5 , output_type='np' , )
__lowerCamelCase : List[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 13 |
'''simple docstring'''
from __future__ import annotations
A__ : int = 10
def UpperCAmelCase__ ( UpperCAmelCase_ : list[int] ) -> list[int]:
__lowerCamelCase : List[Any] = 1
__lowerCamelCase : Any = max(UpperCAmelCase_ )
while placement <= max_digit:
# declare and initialize empty buckets
__lowerCamelCase : list[list] = [[] for _ in range(UpperCAmelCase_ )]
# split list_of_ints between the buckets
for i in list_of_ints:
__lowerCamelCase : List[Any] = int((i / placement) % RADIX )
buckets[tmp].append(UpperCAmelCase_ )
# put each buckets' contents into list_of_ints
__lowerCamelCase : Tuple = 0
for b in range(UpperCAmelCase_ ):
for i in buckets[b]:
__lowerCamelCase : List[Any] = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 1 |
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : int , lowerCAmelCase_ : List[Any] ) -> Tuple:
# we need a list not a string, so do something to change the type
__lowerCAmelCase = arr.split(',' )
def lowercase ( self : Tuple ) -> List[Any]:
__lowerCAmelCase = [int(self.array[0] )] * len(self.array )
__lowerCAmelCase = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
__lowerCAmelCase = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
__lowerCAmelCase = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
_snake_case : Any = input('please input some numbers:')
_snake_case : int = SubArray(whole_array)
_snake_case : int = array.solve_sub_array()
print(('the results is:', re))
| 703 |
import argparse
import json
import os
import tensorstore as ts
import torch
from flax import serialization
from flax.traverse_util import flatten_dict, unflatten_dict
from tensorflow.io import gfile
from transformers.modeling_utils import dtype_byte_size
from transformers.models.switch_transformers.convert_switch_transformers_original_flax_checkpoint_to_pytorch import (
rename_keys,
)
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
from transformers.utils.hub import convert_file_size_to_int
def a_ ( lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : Any ):
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 3:
# expert layer
__lowerCAmelCase = flax_key_tuple[:-1] + ('weight',)
__lowerCAmelCase = torch.permute(lowerCAmelCase_, (0, 2, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowerCAmelCase_ ):
# linear layer
__lowerCAmelCase = flax_key_tuple[:-1] + ('weight',)
__lowerCAmelCase = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__lowerCAmelCase = flax_key_tuple[:-1] + ('weight',)
return flax_key_tuple, flax_tensor
def a_ ( lowerCAmelCase_ : List[Any], lowerCAmelCase_ : Dict, lowerCAmelCase_ : Any ):
if "metadata" in layer:
__lowerCAmelCase = layer.split('metadata' )
__lowerCAmelCase = ''.join(split_layer[0] )[:-1]
__lowerCAmelCase = [tuple(('metadata' + split_layer[1]).split('/' ) )]
elif "kvstore" in layer:
__lowerCAmelCase = layer.split('kvstore' )
__lowerCAmelCase = ''.join(split_layer[0] )[:-1]
__lowerCAmelCase = [tuple(('kvstore' + split_layer[1]).split('/' ) )]
else:
__lowerCAmelCase = layer.split('/' )
__lowerCAmelCase = '/'.join(split_layer[:-1] )
__lowerCAmelCase = (split_layer[-1],)
if "kvstore/path" in layer:
__lowerCAmelCase = F"""{switch_checkpoint_path}/{checkpoint_info[layer]}"""
elif "kvstore/driver" in layer:
__lowerCAmelCase = 'file'
else:
__lowerCAmelCase = checkpoint_info[layer]
return curr_real_layer_name, split_layer, content
def a_ ( lowerCAmelCase_ : List[Any], lowerCAmelCase_ : int ):
__lowerCAmelCase = rename_keys(lowerCAmelCase_ )
__lowerCAmelCase = {}
for k, v in current_block.items():
__lowerCAmelCase = v
__lowerCAmelCase = new_current_block
torch.save(lowerCAmelCase_, lowerCAmelCase_ )
def a_ ( lowerCAmelCase_ : Union[str, Any], lowerCAmelCase_ : Optional[Any], lowerCAmelCase_ : int, lowerCAmelCase_ : Dict, lowerCAmelCase_ : str = WEIGHTS_NAME ):
__lowerCAmelCase = convert_file_size_to_int(lowerCAmelCase_ )
__lowerCAmelCase = []
__lowerCAmelCase = {}
__lowerCAmelCase = 0
__lowerCAmelCase = 0
os.makedirs(lowerCAmelCase_, exist_ok=lowerCAmelCase_ )
with gfile.GFile(switch_checkpoint_path + '/checkpoint', 'rb' ) as fp:
__lowerCAmelCase = serialization.msgpack_restore(fp.read() )['optimizer']['target']
__lowerCAmelCase = flatten_dict(lowerCAmelCase_, sep='/' )
__lowerCAmelCase = {}
for layer in checkpoint_info.keys():
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = get_key_and_tensorstore_dict(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
if curr_real_layer_name in all_layers:
__lowerCAmelCase = content
else:
__lowerCAmelCase = {split_layer[-1]: content}
for key in all_layers.keys():
# open tensorstore file
__lowerCAmelCase = ts.open(unflatten_dict(all_layers[key] ) ).result().read().result()
__lowerCAmelCase = torch.tensor(lowerCAmelCase_ )
__lowerCAmelCase = raw_weights.numel() * dtype_byte_size(raw_weights.dtype )
# use the renaming pattern from the small conversion scripts
__lowerCAmelCase , __lowerCAmelCase = rename_base_flax_keys(tuple(key.split('/' ) ), lowerCAmelCase_ )
__lowerCAmelCase = '/'.join(lowerCAmelCase_ )
# If this weight is going to tip up over the maximal size, we split.
if current_block_size + weight_size > max_shard_size:
__lowerCAmelCase = os.path.join(
lowerCAmelCase_, weights_name.replace('.bin', F"""-{len(lowerCAmelCase_ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowerCAmelCase_, lowerCAmelCase_ )
sharded_state_dicts.append(current_block.keys() )
del current_block
__lowerCAmelCase = {}
__lowerCAmelCase = 0
__lowerCAmelCase = raw_weights.to(getattr(lowerCAmelCase_, lowerCAmelCase_ ) )
current_block_size += weight_size
total_size += weight_size
# Add the last block
__lowerCAmelCase = os.path.join(lowerCAmelCase_, weights_name.replace('.bin', F"""-{len(lowerCAmelCase_ )+1:05d}-of-???.bin""" ) )
rename_and_save_block(lowerCAmelCase_, lowerCAmelCase_ )
sharded_state_dicts.append(current_block.keys() )
# If we only have one shard, we return it
if len(lowerCAmelCase_ ) == 1:
return {weights_name: sharded_state_dicts[0]}, None
# Otherwise, let's build the index
__lowerCAmelCase = {}
__lowerCAmelCase = {}
for idx, shard in enumerate(lowerCAmelCase_ ):
__lowerCAmelCase = weights_name.replace(
'.bin', F"""-{idx+1:05d}-of-{len(lowerCAmelCase_ ):05d}.bin""" ) # len(sharded_state_dicts):05d}
__lowerCAmelCase = os.path.join(lowerCAmelCase_, weights_name.replace('.bin', F"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(lowerCAmelCase_, os.path.join(lowerCAmelCase_, lowerCAmelCase_ ) )
__lowerCAmelCase = shard
for key in shard:
__lowerCAmelCase = shard_file
# Add the metadata
__lowerCAmelCase = {'total_size': total_size}
__lowerCAmelCase = {'metadata': metadata, 'weight_map': weight_map}
with open(os.path.join(lowerCAmelCase_, lowerCAmelCase_ ), 'w', encoding='utf-8' ) as f:
__lowerCAmelCase = json.dumps(lowerCAmelCase_, indent=2, sort_keys=lowerCAmelCase_ ) + '\n'
f.write(lowerCAmelCase_ )
return metadata, index
if __name__ == "__main__":
_snake_case : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--switch_t5x_checkpoint_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128/checkpoint_634600',
type=str,
required=False,
help='Path to a directory containing a folder per layer. Follows the original Google format.',
)
parser.add_argument('--max_shard_size', default='10GB', required=False, help='Max shard size')
parser.add_argument('--dtype', default='bfloat16', type=str, required=False, help='dtype of the saved model')
parser.add_argument(
'--pytorch_dump_folder_path',
default='/mnt/disks/disk_switch/original_checkpoints/switch-xxl-128-converted',
type=str,
required=False,
help='Path to the output pytorch model.',
)
_snake_case : Dict = parser.parse_args()
shard_on_the_fly(
args.switch_tax_checkpoint_path,
args.pytorch_dump_folder_path,
args.max_shard_size,
args.dtype,
)
def a_ ( ):
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration, TaTokenizer
__lowerCAmelCase = SwitchTransformersConfig.from_pretrained('google/switch-base-8' )
config.save_pretrained('/home/arthur_huggingface_co/transformers/switch_converted' )
__lowerCAmelCase = SwitchTransformersForConditionalGeneration.from_pretrained(
'/home/arthur_huggingface_co/transformers/switch_converted', device_map='auto' )
__lowerCAmelCase = TaTokenizer.from_pretrained('t5-small' )
__lowerCAmelCase = 'A <extra_id_0> walks into a bar a orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.'
__lowerCAmelCase = tokenizer(lowerCAmelCase_, return_tensors='pt' ).input_ids
__lowerCAmelCase = model.generate(lowerCAmelCase_, decoder_start_token_id=0 )
print(tokenizer.decode(out[0] ) )
| 421 | 0 |
def __magic_name__ ( SCREAMING_SNAKE_CASE ) -> list:
if len(SCREAMING_SNAKE_CASE ) <= 1:
return [tuple(SCREAMING_SNAKE_CASE )]
_lowercase : List[Any] = []
def generate(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
_lowercase : Tuple = [0] * n
res.append(tuple(SCREAMING_SNAKE_CASE ) )
_lowercase : int = 0
while i < n:
if c[i] < i:
if i % 2 == 0:
_lowercase , _lowercase : Optional[Any] = arr[i], arr[0]
else:
_lowercase , _lowercase : Union[str, Any] = arr[i], arr[c[i]]
res.append(tuple(SCREAMING_SNAKE_CASE ) )
c[i] += 1
_lowercase : Tuple = 0
else:
_lowercase : str = 0
i += 1
generate(len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
return res
if __name__ == "__main__":
UpperCamelCase = input("Enter numbers separated by a comma:\n").strip()
UpperCamelCase = [int(item) for item in user_input.split(",")]
print(heaps(arr))
| 66 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE ):
A__ : str = '''bert-generation'''
def __init__( self : Tuple , __lowerCamelCase : Optional[int]=5_0_3_5_8 , __lowerCamelCase : List[str]=1_0_2_4 , __lowerCamelCase : Optional[Any]=2_4 , __lowerCamelCase : Any=1_6 , __lowerCamelCase : Union[str, Any]=4_0_9_6 , __lowerCamelCase : Optional[Any]="gelu" , __lowerCamelCase : str=0.1 , __lowerCamelCase : List[str]=0.1 , __lowerCamelCase : int=5_1_2 , __lowerCamelCase : Dict=0.0_2 , __lowerCamelCase : Tuple=1E-12 , __lowerCamelCase : Any=0 , __lowerCamelCase : Union[str, Any]=2 , __lowerCamelCase : List[Any]=1 , __lowerCamelCase : Optional[Any]="absolute" , __lowerCamelCase : str=True , **__lowerCamelCase : List[Any] , ):
"""simple docstring"""
super().__init__(pad_token_id=__lowerCamelCase , bos_token_id=__lowerCamelCase , eos_token_id=__lowerCamelCase , **__lowerCamelCase )
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = hidden_act
_snake_case = intermediate_size
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = initializer_range
_snake_case = layer_norm_eps
_snake_case = position_embedding_type
_snake_case = use_cache
| 103 | 0 |
from string import ascii_uppercase
__lowerCAmelCase = {str(ord(c) - 5_5): c for c in ascii_uppercase}
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> str:
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("int() can't convert non-string with explicit base" )
if num < 0:
raise ValueError("parameter must be positive int" )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if base in (0, 1):
raise ValueError("base must be >= 2" )
if base > 36:
raise ValueError("base must be <= 36" )
_UpperCAmelCase = ""
_UpperCAmelCase = 0
_UpperCAmelCase = 0
while div != 1:
_UpperCAmelCase , _UpperCAmelCase = divmod(_lowerCAmelCase , _lowerCAmelCase )
if base >= 11 and 9 < mod < 36:
_UpperCAmelCase = ALPHABET_VALUES[str(_lowerCAmelCase )]
else:
_UpperCAmelCase = str(_lowerCAmelCase )
new_value += actual_value
_UpperCAmelCase = num // base
_UpperCAmelCase = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(_lowerCAmelCase )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 3_7):
for num in range(1_0_0_0):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 129 |
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = r"\n Args:\n input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):\n Indices of input sequence tokens in the vocabulary.\n\n Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and\n [`PreTrainedTokenizer.__call__`] for details.\n\n [What are input IDs?](../glossary#input-ids)\n scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):\n Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax\n or scores for each vocabulary token after SoftMax.\n kwargs (`Dict[str, Any]`, *optional*):\n Additional stopping criteria specific kwargs.\n\n Return:\n `bool`. `False` indicates we should continue, `True` indicates we should stop.\n\n"
class __SCREAMING_SNAKE_CASE ( lowercase):
@add_start_docstrings(__UpperCamelCase )
def __call__( self : Optional[int] , __UpperCamelCase : torch.LongTensor , __UpperCamelCase : torch.FloatTensor , **__UpperCamelCase : str ):
raise NotImplementedError("StoppingCriteria needs to be subclassed" )
class __SCREAMING_SNAKE_CASE ( lowercase):
def __init__( self : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Optional[int] = None ):
_UpperCAmelCase = max_length
_UpperCAmelCase = max_position_embeddings
@add_start_docstrings(__UpperCamelCase )
def __call__( self : Union[str, Any] , __UpperCamelCase : torch.LongTensor , __UpperCamelCase : torch.FloatTensor , **__UpperCamelCase : Any ):
_UpperCAmelCase = input_ids.shape[-1]
_UpperCAmelCase = cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
"This is a friendly reminder - the current text generation call will exceed the model's predefined "
F'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe '''
"exceptions, performance degradation, or nothing at all." )
return is_done
class __SCREAMING_SNAKE_CASE ( lowercase):
def __init__( self : Tuple , __UpperCamelCase : int , __UpperCamelCase : int ):
warnings.warn(
"The class `MaxNewTokensCriteria` is deprecated. "
F'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` '''
"with `max_length = start_length + max_new_tokens` instead." , __UpperCamelCase , )
_UpperCAmelCase = start_length
_UpperCAmelCase = max_new_tokens
_UpperCAmelCase = start_length + max_new_tokens
@add_start_docstrings(__UpperCamelCase )
def __call__( self : Union[str, Any] , __UpperCamelCase : torch.LongTensor , __UpperCamelCase : torch.FloatTensor , **__UpperCamelCase : List[str] ):
return input_ids.shape[-1] >= self.max_length
class __SCREAMING_SNAKE_CASE ( lowercase):
def __init__( self : Union[str, Any] , __UpperCamelCase : float , __UpperCamelCase : Optional[float] = None ):
_UpperCAmelCase = max_time
_UpperCAmelCase = time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(__UpperCamelCase )
def __call__( self : Union[str, Any] , __UpperCamelCase : torch.LongTensor , __UpperCamelCase : torch.FloatTensor , **__UpperCamelCase : Any ):
return time.time() - self.initial_timestamp > self.max_time
class __SCREAMING_SNAKE_CASE ( lowercase):
@add_start_docstrings(__UpperCamelCase )
def __call__( self : int , __UpperCamelCase : torch.LongTensor , __UpperCamelCase : torch.FloatTensor , **__UpperCamelCase : Optional[Any] ):
return any(criteria(__UpperCamelCase , __UpperCamelCase ) for criteria in self )
@property
def UpperCAmelCase__ ( self : Dict ):
for stopping_criterium in self:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
return stopping_criterium.max_length
elif isinstance(__UpperCamelCase , __UpperCamelCase ):
return stopping_criterium.max_length
return None
def __lowerCamelCase ( _lowerCAmelCase , _lowerCAmelCase ) -> StoppingCriteriaList:
_UpperCAmelCase = stopping_criteria.max_length
_UpperCAmelCase = deepcopy(_lowerCAmelCase )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn("You set different `max_length` for stopping criteria and `max_length` parameter" , _lowerCAmelCase )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=_lowerCAmelCase ) )
return new_stopping_criteria
| 129 | 1 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
_a : List[Any] = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class __A (unittest.TestCase ):
snake_case :str = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
snake_case :int = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
snake_case :List[str] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
snake_case :int = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : int = ZeroShotClassificationPipeline(
model=UpperCamelCase_ , tokenizer=UpperCamelCase_ , candidate_labels=["polics", "health"] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_ ):
__UpperCAmelCase : Dict = classifier("Who are you voting for in 2020?" , candidate_labels="politics" )
self.assertEqual(UpperCamelCase_ , {"sequence": ANY(UpperCamelCase_ ), "labels": [ANY(UpperCamelCase_ )], "scores": [ANY(UpperCamelCase_ )]} )
# No kwarg
__UpperCAmelCase : List[str] = classifier("Who are you voting for in 2020?" , ["politics"] )
self.assertEqual(UpperCamelCase_ , {"sequence": ANY(UpperCamelCase_ ), "labels": [ANY(UpperCamelCase_ )], "scores": [ANY(UpperCamelCase_ )]} )
__UpperCAmelCase : Dict = classifier("Who are you voting for in 2020?" , candidate_labels=["politics"] )
self.assertEqual(UpperCamelCase_ , {"sequence": ANY(UpperCamelCase_ ), "labels": [ANY(UpperCamelCase_ )], "scores": [ANY(UpperCamelCase_ )]} )
__UpperCAmelCase : str = classifier("Who are you voting for in 2020?" , candidate_labels="politics, public health" )
self.assertEqual(
UpperCamelCase_ , {"sequence": ANY(UpperCamelCase_ ), "labels": [ANY(UpperCamelCase_ ), ANY(UpperCamelCase_ )], "scores": [ANY(UpperCamelCase_ ), ANY(UpperCamelCase_ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
__UpperCAmelCase : Dict = classifier("Who are you voting for in 2020?" , candidate_labels=["politics", "public health"] )
self.assertEqual(
UpperCamelCase_ , {"sequence": ANY(UpperCamelCase_ ), "labels": [ANY(UpperCamelCase_ ), ANY(UpperCamelCase_ )], "scores": [ANY(UpperCamelCase_ ), ANY(UpperCamelCase_ )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["scores"] ) ) , 1.0 )
__UpperCAmelCase : Union[str, Any] = classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="This text is about {}" )
self.assertEqual(UpperCamelCase_ , {"sequence": ANY(UpperCamelCase_ ), "labels": [ANY(UpperCamelCase_ )], "scores": [ANY(UpperCamelCase_ )]} )
# https://github.com/huggingface/transformers/issues/13846
__UpperCAmelCase : Optional[int] = classifier(["I am happy"] , ["positive", "negative"] )
self.assertEqual(
UpperCamelCase_ , [
{"sequence": ANY(UpperCamelCase_ ), "labels": [ANY(UpperCamelCase_ ), ANY(UpperCamelCase_ )], "scores": [ANY(UpperCamelCase_ ), ANY(UpperCamelCase_ )]}
for i in range(1 )
] , )
__UpperCAmelCase : str = classifier(["I am happy", "I am sad"] , ["positive", "negative"] )
self.assertEqual(
UpperCamelCase_ , [
{"sequence": ANY(UpperCamelCase_ ), "labels": [ANY(UpperCamelCase_ ), ANY(UpperCamelCase_ )], "scores": [ANY(UpperCamelCase_ ), ANY(UpperCamelCase_ )]}
for i in range(2 )
] , )
with self.assertRaises(UpperCamelCase_ ):
classifier("" , candidate_labels="politics" )
with self.assertRaises(UpperCamelCase_ ):
classifier(UpperCamelCase_ , candidate_labels="politics" )
with self.assertRaises(UpperCamelCase_ ):
classifier("Who are you voting for in 2020?" , candidate_labels="" )
with self.assertRaises(UpperCamelCase_ ):
classifier("Who are you voting for in 2020?" , candidate_labels=UpperCamelCase_ )
with self.assertRaises(UpperCamelCase_ ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template="Not formatting template" , )
with self.assertRaises(UpperCamelCase_ ):
classifier(
"Who are you voting for in 2020?" , candidate_labels="politics" , hypothesis_template=UpperCamelCase_ , )
self.run_entailment_id(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ):
__UpperCAmelCase : Tuple = zero_shot_classifier.model.config
__UpperCAmelCase : Optional[int] = config.labelaid
__UpperCAmelCase : Dict = zero_shot_classifier.entailment_id
__UpperCAmelCase : Union[str, Any] = {"LABEL_0": 0, "LABEL_1": 1, "LABEL_2": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
__UpperCAmelCase : Union[str, Any] = {"entailment": 0, "neutral": 1, "contradiction": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
__UpperCAmelCase : Optional[Any] = {"ENTAIL": 0, "NON-ENTAIL": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
__UpperCAmelCase : List[str] = {"ENTAIL": 2, "NEUTRAL": 1, "CONTR": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
__UpperCAmelCase : Optional[int] = original_labelaid
self.assertEqual(UpperCamelCase_ , zero_shot_classifier.entailment_id )
@require_torch
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"Who are you voting for in 2020?" * 1_00 , candidate_labels=["politics", "public health", "science"] )
@require_torch
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="pt" , )
__UpperCAmelCase : Union[str, Any] = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(UpperCamelCase_ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@require_tf
def _snake_case ( self ):
__UpperCAmelCase : str = pipeline(
"zero-shot-classification" , model="sshleifer/tiny-distilbert-base-cased-distilled-squad" , framework="tf" , )
__UpperCAmelCase : Union[str, Any] = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(UpperCamelCase_ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["science", "public health", "politics"],
"scores": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@slow
@require_torch
def _snake_case ( self ):
__UpperCAmelCase : Optional[Any] = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="pt" )
__UpperCAmelCase : Any = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(UpperCamelCase_ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
__UpperCAmelCase : int = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=UpperCamelCase_ , )
self.assertEqual(
nested_simplify(UpperCamelCase_ ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
@slow
@require_tf
def _snake_case ( self ):
__UpperCAmelCase : Union[str, Any] = pipeline("zero-shot-classification" , model="roberta-large-mnli" , framework="tf" )
__UpperCAmelCase : str = zero_shot_classifier(
"Who are you voting for in 2020?" , candidate_labels=["politics", "public health", "science"] )
self.assertEqual(
nested_simplify(UpperCamelCase_ ) , {
"sequence": "Who are you voting for in 2020?",
"labels": ["politics", "public health", "science"],
"scores": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
__UpperCAmelCase : List[Any] = zero_shot_classifier(
"The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"
" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"
" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"
" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"
" machine translation tasks show these models to be superior in quality while being more parallelizable"
" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"
" English-to-German translation task, improving over the existing best results, including ensembles by"
" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"
" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"
" fraction of the training costs of the best models from the literature. We show that the Transformer"
" generalizes well to other tasks by applying it successfully to English constituency parsing both with"
" large and limited training data." , candidate_labels=["machine learning", "statistics", "translation", "vision"] , multi_label=UpperCamelCase_ , )
self.assertEqual(
nested_simplify(UpperCamelCase_ ) , {
"sequence": (
"The dominant sequence transduction models are based on complex recurrent or convolutional neural"
" networks in an encoder-decoder configuration. The best performing models also connect the"
" encoder and decoder through an attention mechanism. We propose a new simple network"
" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"
" and convolutions entirely. Experiments on two machine translation tasks show these models to be"
" superior in quality while being more parallelizable and requiring significantly less time to"
" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"
" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"
" English-to-French translation task, our model establishes a new single-model state-of-the-art"
" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"
" costs of the best models from the literature. We show that the Transformer generalizes well to"
" other tasks by applying it successfully to English constituency parsing both with large and"
" limited training data."
),
"labels": ["translation", "machine learning", "vision", "statistics"],
"scores": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
| 168 | '''simple docstring'''
from __future__ import annotations
def _lowercase ( lowerCamelCase__ ) -> bool:
"""simple docstring"""
__UpperCAmelCase : int = len(lowerCamelCase__ )
# We need to create solution object to save path.
__UpperCAmelCase : List[str] = [[0 for _ in range(lowerCamelCase__ )] for _ in range(lowerCamelCase__ )]
__UpperCAmelCase : Optional[Any] = run_maze(lowerCamelCase__ , 0 , 0 , lowerCamelCase__ )
if solved:
print("\n".join(str(lowerCamelCase__ ) for row in solutions ) )
else:
print("No solution exists!" )
return solved
def _lowercase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> bool:
"""simple docstring"""
__UpperCAmelCase : str = len(lowerCamelCase__ )
# Final check point.
if i == j == (size - 1):
__UpperCAmelCase : str = 1
return True
__UpperCAmelCase : Any = (not i < 0) and (not j < 0) # Check lower bounds
__UpperCAmelCase : Union[str, Any] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
__UpperCAmelCase : Tuple = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
__UpperCAmelCase : Optional[int] = 1
# check for directions
if (
run_maze(lowerCamelCase__ , i + 1 , lowerCamelCase__ , lowerCamelCase__ )
or run_maze(lowerCamelCase__ , lowerCamelCase__ , j + 1 , lowerCamelCase__ )
or run_maze(lowerCamelCase__ , i - 1 , lowerCamelCase__ , lowerCamelCase__ )
or run_maze(lowerCamelCase__ , lowerCamelCase__ , j - 1 , lowerCamelCase__ )
):
return True
__UpperCAmelCase : Dict = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 168 | 1 |
__snake_case = [
"""Audio""",
"""Array2D""",
"""Array3D""",
"""Array4D""",
"""Array5D""",
"""ClassLabel""",
"""Features""",
"""Sequence""",
"""Value""",
"""Image""",
"""Translation""",
"""TranslationVariableLanguages""",
]
from .audio import Audio
from .features import ArrayaD, ArrayaD, ArrayaD, ArrayaD, ClassLabel, Features, Sequence, Value
from .image import Image
from .translation import Translation, TranslationVariableLanguages
| 714 | '''simple docstring'''
from __future__ import annotations
def A_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ->list:
lowercase_ = []
lowercase_ , lowercase_ = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
lowercase_ = result + left + right
return input_list
def A_ ( SCREAMING_SNAKE_CASE_ ) ->list:
if len(SCREAMING_SNAKE_CASE_ ) <= 1:
return input_list
lowercase_ = list(SCREAMING_SNAKE_CASE_ )
# iteration for two-way merging
lowercase_ = 2
while p <= len(SCREAMING_SNAKE_CASE_ ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ):
lowercase_ = i
lowercase_ = i + p - 1
lowercase_ = (low + high + 1) // 2
lowercase_ = merge(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# final merge of last two parts
if p * 2 >= len(SCREAMING_SNAKE_CASE_ ):
lowercase_ = i
lowercase_ = merge(SCREAMING_SNAKE_CASE_ , 0 , SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
__snake_case = input("""Enter numbers separated by a comma:\n""").strip()
if user_input == "":
__snake_case = []
else:
__snake_case = [int(item.strip()) for item in user_input.split(""",""")]
print(iter_merge_sort(unsorted))
| 603 | 0 |
"""simple docstring"""
def UpperCamelCase ( _lowerCAmelCase : Tuple, _lowerCAmelCase : Any, _lowerCAmelCase : int=False ) -> Union[str, Any]:
if isinstance(_UpperCamelCase, _UpperCamelCase ) and isinstance(_UpperCamelCase, _UpperCamelCase ):
_UpperCAmelCase : Any = len(set_a.intersection(_UpperCamelCase ) )
if alternative_union:
_UpperCAmelCase : Union[str, Any] = len(_UpperCamelCase ) + len(_UpperCamelCase )
else:
_UpperCAmelCase : str = len(set_a.union(_UpperCamelCase ) )
return intersection / union
if isinstance(_UpperCamelCase, (list, tuple) ) and isinstance(_UpperCamelCase, (list, tuple) ):
_UpperCAmelCase : List[Any] = [element for element in set_a if element in set_b]
if alternative_union:
_UpperCAmelCase : Any = len(_UpperCamelCase ) + len(_UpperCamelCase )
return len(_UpperCamelCase ) / union
else:
_UpperCAmelCase : str = set_a + [element for element in set_b if element not in set_a]
return len(_UpperCamelCase ) / len(_UpperCamelCase )
return len(_UpperCamelCase ) / len(_UpperCamelCase )
return None
if __name__ == "__main__":
lowerCamelCase__ : str = {"a", "b", "c", "d", "e"}
lowerCamelCase__ : List[str] = {"c", "d", "e", "f", "h", "i"}
print(jaccard_similarity(set_a, set_b))
| 238 | '''simple docstring'''
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Union[str, Any] = [
"word_embeddings_layernorm.weight",
"word_embeddings_layernorm.bias",
"input_layernorm.weight",
"input_layernorm.bias",
"post_attention_layernorm.weight",
"post_attention_layernorm.bias",
"self_attention.dense.bias",
"mlp.dense_4h_to_h.bias",
"ln_f.weight",
"ln_f.bias",
]
lowercase__ : Dict = [
"mlp.dense_4h_to_h.weight",
"self_attention.dense.weight",
]
def __lowerCamelCase ( _UpperCamelCase : int , _UpperCamelCase : Tuple ):
'''simple docstring'''
UpperCAmelCase_ = {
'''word_embeddings.weight''': '''word_embeddings.weight''',
'''word_embeddings.norm.weight''': '''word_embeddings_layernorm.weight''',
'''word_embeddings.norm.bias''': '''word_embeddings_layernorm.bias''',
'''weight''': '''ln_f.weight''',
'''bias''': '''ln_f.bias''',
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
UpperCAmelCase_ = int(re.match(R'''.*layer_(\d*).*''' , _UpperCamelCase )[1] )
layer_number -= 3
return F"""h.{layer_number}.""" + key
def __lowerCamelCase ( _UpperCamelCase : Optional[Any] ):
'''simple docstring'''
if dtype == torch.bool:
return 1 / 8
UpperCAmelCase_ = re.search(R'''[^\d](\d+)$''' , str(_UpperCamelCase ) )
if bit_search is None:
raise ValueError(F"""`dtype` is not a valid dtype: {dtype}.""" )
UpperCAmelCase_ = int(bit_search.groups()[0] )
return bit_size // 8
def __lowerCamelCase ( _UpperCamelCase : List[str] , _UpperCamelCase : str , _UpperCamelCase : Tuple , _UpperCamelCase : Dict , _UpperCamelCase : Dict ):
'''simple docstring'''
if bloom_config_file == "":
UpperCAmelCase_ = BloomConfig()
else:
UpperCAmelCase_ = BloomConfig.from_json_file(_UpperCamelCase )
if shard_model:
UpperCAmelCase_ = os.listdir(_UpperCamelCase )
UpperCAmelCase_ = sorted(filter(lambda _UpperCamelCase : s.startswith('''layer''' ) and "model_00" in s , _UpperCamelCase ) )
UpperCAmelCase_ = {'''weight_map''': {}, '''metadata''': {}}
UpperCAmelCase_ = 0
UpperCAmelCase_ = None
UpperCAmelCase_ = BloomConfig()
for j, file in enumerate(_UpperCamelCase ):
print('''Processing file: {}'''.format(_UpperCamelCase ) )
UpperCAmelCase_ = None
for i in range(_UpperCamelCase ):
# load all TP files
UpperCAmelCase_ = file.replace('''model_00''' , F"""model_0{i}""" )
UpperCAmelCase_ = torch.load(os.path.join(_UpperCamelCase , _UpperCamelCase ) , map_location='''cpu''' )
# Rename keys in the transformers names
UpperCAmelCase_ = list(temp.keys() )
for key in keys:
UpperCAmelCase_ = temp.pop(_UpperCamelCase )
if tensors is None:
UpperCAmelCase_ = temp
else:
for key in tensors.keys():
if any(key.endswith(_UpperCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCAmelCase_ = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCAmelCase_ = torch.cat([tensors[key], temp[key]] , dim=_UpperCamelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_UpperCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCAmelCase_ = tensors[key] / pretraining_tp
torch.save(
_UpperCamelCase , os.path.join(
_UpperCamelCase , '''pytorch_model_{}-of-{}.bin'''.format(str(j + 1 ).zfill(5 ) , str(len(_UpperCamelCase ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
UpperCAmelCase_ = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
UpperCAmelCase_ = '''pytorch_model_{}-of-{}.bin'''.format(
str(j + 1 ).zfill(5 ) , str(len(_UpperCamelCase ) ).zfill(5 ) )
UpperCAmelCase_ = BloomConfig()
UpperCAmelCase_ = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
UpperCAmelCase_ = total_size
with open(_UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
with open(os.path.join(_UpperCamelCase , WEIGHTS_NAME + '''.index.json''' ) , '''w''' , encoding='''utf-8''' ) as f:
UpperCAmelCase_ = json.dumps(_UpperCamelCase , indent=2 , sort_keys=_UpperCamelCase ) + '''\n'''
f.write(_UpperCamelCase )
else:
UpperCAmelCase_ = BloomModel(_UpperCamelCase )
UpperCAmelCase_ = os.listdir(_UpperCamelCase )
UpperCAmelCase_ = sorted(filter(lambda _UpperCamelCase : s.startswith('''layer''' ) and "model_00" in s , _UpperCamelCase ) )
UpperCAmelCase_ = None
for i, file in enumerate(_UpperCamelCase ):
UpperCAmelCase_ = None
for i in range(_UpperCamelCase ):
# load all TP files
UpperCAmelCase_ = file.replace('''model_00''' , F"""model_0{i}""" )
UpperCAmelCase_ = torch.load(os.path.join(_UpperCamelCase , _UpperCamelCase ) , map_location='''cpu''' )
# Rename keys in the transformers names
UpperCAmelCase_ = list(temp.keys() )
for key in keys:
UpperCAmelCase_ = temp.pop(_UpperCamelCase )
if tensors is None:
UpperCAmelCase_ = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(_UpperCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
UpperCAmelCase_ = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
UpperCAmelCase_ = torch.cat([tensors[key], temp[key]] , dim=_UpperCamelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(_UpperCamelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
UpperCAmelCase_ = tensors[key] / pretraining_tp
UpperCAmelCase_ = model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
assert not other_keys.unexpected_keys, F"""The keys {other_keys.unexpected_keys} are unexpected"""
if missing_keys is None:
UpperCAmelCase_ = set(other_keys.missing_keys )
else:
UpperCAmelCase_ = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, F"""The keys {missing_keys} are missing"""
# Save pytorch-model
os.makedirs(_UpperCamelCase , exist_ok=_UpperCamelCase )
UpperCAmelCase_ = pytorch_dump_folder_path + '''/''' + WEIGHTS_NAME
UpperCAmelCase_ = pytorch_dump_folder_path + '''/''' + CONFIG_NAME
print(F"""Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}""" )
if config.torch_dtype is not None:
UpperCAmelCase_ = model.to(config.torch_dtype )
torch.save(model.state_dict() , _UpperCamelCase )
print(F"""Save configuration file to {pytorch_config_dump_path}""" )
with open(_UpperCamelCase , '''w''' , encoding='''utf-8''' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowercase__ : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bloom_checkpoint_path",
default=None,
type=str,
required=True,
help="Path to the Megatron-LM checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--bloom_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--shard_model",
action="store_true",
help="An optional setting to shard the output model \nThis enables sharding the converted checkpoint",
)
parser.add_argument(
"--pretraining_tp",
default=4,
type=int,
help="Pretraining TP rank that has been used when training the model in Megatron-LM \n",
)
lowercase__ : Any = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 390 | 0 |
'''simple docstring'''
import re
def __A(lowerCAmelCase ) -> bool:
"""simple docstring"""
_UpperCamelCase = re.compile(
R"""^(?:0|94|\+94|0{2}94)""" R"""7(0|1|2|4|5|6|7|8)""" R"""(-| |)""" R"""\d{7}$""" )
return bool(re.search(lowerCAmelCase , lowerCAmelCase ) )
if __name__ == "__main__":
lowerCamelCase__ = "0094702343221"
print(is_sri_lankan_phone_number(phone))
| 715 |
import argparse
import os
import re
import tensorflow as tf
import torch
from transformers import BertConfig, BertModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def __A(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Any:
"""simple docstring"""
_UpperCamelCase = os.path.abspath(lowerCAmelCase )
logger.info(F'Converting TensorFlow checkpoint from {tf_path}' )
# Load weights from TF model
_UpperCamelCase = tf.train.list_variables(lowerCAmelCase )
_UpperCamelCase = []
_UpperCamelCase = []
_UpperCamelCase = []
for full_name, shape in init_vars:
# logger.info(f"Loading TF weight {name} with shape {shape}")
_UpperCamelCase = full_name.split("""/""" )
if full_name == "_CHECKPOINTABLE_OBJECT_GRAPH" or name[0] in ["global_step", "save_counter"]:
logger.info(F'Skipping non-model layer {full_name}' )
continue
if "optimizer" in full_name:
logger.info(F'Skipping optimization layer {full_name}' )
continue
if name[0] == "model":
# ignore initial 'model'
_UpperCamelCase = name[1:]
# figure out how many levels deep the name is
_UpperCamelCase = 0
for _name in name:
if _name.startswith("""layer_with_weights""" ):
depth += 1
else:
break
layer_depth.append(lowerCAmelCase )
# read data
_UpperCamelCase = tf.train.load_variable(lowerCAmelCase , lowerCAmelCase )
names.append("""/""".join(lowerCAmelCase ) )
arrays.append(lowerCAmelCase )
logger.info(F'Read a total of {len(lowerCAmelCase ):,} layers' )
# Sanity check
if len(set(lowerCAmelCase ) ) != 1:
raise ValueError(F'Found layer names with different depths (layer depth {list(set(lowerCAmelCase ) )})' )
_UpperCamelCase = list(set(lowerCAmelCase ) )[0]
if layer_depth != 1:
raise ValueError(
"""The model contains more than just the embedding/encoder layers. This script does not handle MLM/NSP"""
""" heads.""" )
# convert layers
logger.info("""Converting weights...""" )
for full_name, array in zip(lowerCAmelCase , lowerCAmelCase ):
_UpperCamelCase = full_name.split("""/""" )
_UpperCamelCase = model
_UpperCamelCase = []
for i, m_name in enumerate(lowerCAmelCase ):
if m_name == ".ATTRIBUTES":
# variable names end with .ATTRIBUTES/VARIABLE_VALUE
break
if m_name.startswith("""layer_with_weights""" ):
_UpperCamelCase = int(m_name.split("""-""" )[-1] )
if layer_num <= 2:
# embedding layers
# layer_num 0: word_embeddings
# layer_num 1: position_embeddings
# layer_num 2: token_type_embeddings
continue
elif layer_num == 3:
# embedding LayerNorm
trace.extend(["""embeddings""", """LayerNorm"""] )
_UpperCamelCase = getattr(lowerCAmelCase , """embeddings""" )
_UpperCamelCase = getattr(lowerCAmelCase , """LayerNorm""" )
elif layer_num > 3 and layer_num < config.num_hidden_layers + 4:
# encoder layers
trace.extend(["""encoder""", """layer""", str(layer_num - 4 )] )
_UpperCamelCase = getattr(lowerCAmelCase , """encoder""" )
_UpperCamelCase = getattr(lowerCAmelCase , """layer""" )
_UpperCamelCase = pointer[layer_num - 4]
elif layer_num == config.num_hidden_layers + 4:
# pooler layer
trace.extend(["""pooler""", """dense"""] )
_UpperCamelCase = getattr(lowerCAmelCase , """pooler""" )
_UpperCamelCase = getattr(lowerCAmelCase , """dense""" )
elif m_name == "embeddings":
trace.append("""embeddings""" )
_UpperCamelCase = getattr(lowerCAmelCase , """embeddings""" )
if layer_num == 0:
trace.append("""word_embeddings""" )
_UpperCamelCase = getattr(lowerCAmelCase , """word_embeddings""" )
elif layer_num == 1:
trace.append("""position_embeddings""" )
_UpperCamelCase = getattr(lowerCAmelCase , """position_embeddings""" )
elif layer_num == 2:
trace.append("""token_type_embeddings""" )
_UpperCamelCase = getattr(lowerCAmelCase , """token_type_embeddings""" )
else:
raise ValueError(F'Unknown embedding layer with name {full_name}' )
trace.append("""weight""" )
_UpperCamelCase = getattr(lowerCAmelCase , """weight""" )
elif m_name == "_attention_layer":
# self-attention layer
trace.extend(["""attention""", """self"""] )
_UpperCamelCase = getattr(lowerCAmelCase , """attention""" )
_UpperCamelCase = getattr(lowerCAmelCase , """self""" )
elif m_name == "_attention_layer_norm":
# output attention norm
trace.extend(["""attention""", """output""", """LayerNorm"""] )
_UpperCamelCase = getattr(lowerCAmelCase , """attention""" )
_UpperCamelCase = getattr(lowerCAmelCase , """output""" )
_UpperCamelCase = getattr(lowerCAmelCase , """LayerNorm""" )
elif m_name == "_attention_output_dense":
# output attention dense
trace.extend(["""attention""", """output""", """dense"""] )
_UpperCamelCase = getattr(lowerCAmelCase , """attention""" )
_UpperCamelCase = getattr(lowerCAmelCase , """output""" )
_UpperCamelCase = getattr(lowerCAmelCase , """dense""" )
elif m_name == "_output_dense":
# output dense
trace.extend(["""output""", """dense"""] )
_UpperCamelCase = getattr(lowerCAmelCase , """output""" )
_UpperCamelCase = getattr(lowerCAmelCase , """dense""" )
elif m_name == "_output_layer_norm":
# output dense
trace.extend(["""output""", """LayerNorm"""] )
_UpperCamelCase = getattr(lowerCAmelCase , """output""" )
_UpperCamelCase = getattr(lowerCAmelCase , """LayerNorm""" )
elif m_name == "_key_dense":
# attention key
trace.append("""key""" )
_UpperCamelCase = getattr(lowerCAmelCase , """key""" )
elif m_name == "_query_dense":
# attention query
trace.append("""query""" )
_UpperCamelCase = getattr(lowerCAmelCase , """query""" )
elif m_name == "_value_dense":
# attention value
trace.append("""value""" )
_UpperCamelCase = getattr(lowerCAmelCase , """value""" )
elif m_name == "_intermediate_dense":
# attention intermediate dense
trace.extend(["""intermediate""", """dense"""] )
_UpperCamelCase = getattr(lowerCAmelCase , """intermediate""" )
_UpperCamelCase = getattr(lowerCAmelCase , """dense""" )
elif m_name == "_output_layer_norm":
# output layer norm
trace.append("""output""" )
_UpperCamelCase = getattr(lowerCAmelCase , """output""" )
# weights & biases
elif m_name in ["bias", "beta"]:
trace.append("""bias""" )
_UpperCamelCase = getattr(lowerCAmelCase , """bias""" )
elif m_name in ["kernel", "gamma"]:
trace.append("""weight""" )
_UpperCamelCase = getattr(lowerCAmelCase , """weight""" )
else:
logger.warning(F'Ignored {m_name}' )
# for certain layers reshape is necessary
_UpperCamelCase = """.""".join(lowerCAmelCase )
if re.match(R"""(\S+)\.attention\.self\.(key|value|query)\.(bias|weight)""" , lowerCAmelCase ) or re.match(
R"""(\S+)\.attention\.output\.dense\.weight""" , lowerCAmelCase ):
_UpperCamelCase = array.reshape(pointer.data.shape )
if "kernel" in full_name:
_UpperCamelCase = array.transpose()
if pointer.shape == array.shape:
_UpperCamelCase = torch.from_numpy(lowerCAmelCase )
else:
raise ValueError(
F'Shape mismatch in layer {full_name}: Model expects shape {pointer.shape} but layer contains shape:'
F' {array.shape}' )
logger.info(F'Successfully set variable {full_name} to PyTorch layer {trace}' )
return model
def __A(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> Tuple:
"""simple docstring"""
logger.info(F'Loading model based on config from {config_path}...' )
_UpperCamelCase = BertConfig.from_json_file(lowerCAmelCase )
_UpperCamelCase = BertModel(lowerCAmelCase )
# Load weights from checkpoint
logger.info(F'Loading weights from checkpoint {tf_checkpoint_path}...' )
load_tfa_weights_in_bert(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Save pytorch-model
logger.info(F'Saving PyTorch model to {pytorch_dump_path}...' )
torch.save(model.state_dict() , lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"--tf_checkpoint_path", type=str, required=True, help="Path to the TensorFlow 2.x checkpoint path."
)
parser.add_argument(
"--bert_config_file",
type=str,
required=True,
help="The config json file corresponding to the BERT model. This specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path",
type=str,
required=True,
help="Path to the output PyTorch model (must include filename).",
)
lowerCamelCase__ = parser.parse_args()
convert_tfa_checkpoint_to_pytorch(args.tf_checkpoint_path, args.bert_config_file, args.pytorch_dump_path)
| 202 | 0 |
'''simple docstring'''
from math import factorial
def __UpperCAmelCase (lowercase__ ,lowercase__ ,lowercase__ ) -> float:
'''simple docstring'''
if successes > trials:
raise ValueError("successes must be lower or equal to trials" )
if trials < 0 or successes < 0:
raise ValueError("the function is defined for non-negative integers" )
if not isinstance(lowercase__ ,lowercase__ ) or not isinstance(lowercase__ ,lowercase__ ):
raise ValueError("the function is defined for non-negative integers" )
if not 0 < prob < 1:
raise ValueError("prob has to be in range of 1 - 0" )
a_ = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
a_ = float(factorial(lowercase__ ) )
coefficient /= factorial(lowercase__ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print('Probability of 2 successes out of 4 trails')
print('with probability of 0.75 is:', end=' ')
print(binomial_distribution(2, 4, 0.75))
| 685 |
'''simple docstring'''
from argparse import ArgumentParser
from .env import EnvironmentCommand
def _lowerCAmelCase ( ) -> Union[str, Any]:
__lowerCAmelCase = ArgumentParser("""Diffusers CLI tool""" , usage="""diffusers-cli <command> [<args>]""" )
__lowerCAmelCase = parser.add_subparsers(help="""diffusers-cli command helpers""" )
# Register commands
EnvironmentCommand.register_subcommand(lowercase )
# Let's go
__lowerCAmelCase = parser.parse_args()
if not hasattr(lowercase , """func""" ):
parser.print_help()
exit(1 )
# Run
__lowerCAmelCase = args.func(lowercase )
service.run()
if __name__ == "__main__":
main()
| 689 | 0 |
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append(".")
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :int = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
"`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got "
F"{test_file} instead." )
lowercase :str = components[-1]
if not test_fn.endswith("py" ):
raise ValueError(F"`test_file` should be a python file. Got {test_fn} instead." )
if not test_fn.startswith("test_modeling_" ):
raise ValueError(
F"`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead." )
lowercase :str = components[:-1] + [test_fn.replace(".py", "" )]
lowercase :List[Any] = ".".join(lowerCamelCase )
return test_module_path
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :Optional[int] = get_module_path(lowerCamelCase )
lowercase :List[Any] = importlib.import_module(lowerCamelCase )
return test_module
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :Optional[Any] = []
lowercase :Dict = get_test_module(lowerCamelCase )
for attr in dir(lowerCamelCase ):
if attr.endswith("ModelTester" ):
tester_classes.append(getattr(lowerCamelCase, lowerCamelCase ) )
# sort with class names
return sorted(lowerCamelCase, key=lambda lowerCamelCase : x.__name__ )
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :Optional[int] = []
lowercase :Optional[Any] = get_test_module(lowerCamelCase )
for attr in dir(lowerCamelCase ):
lowercase :Any = getattr(lowerCamelCase, lowerCamelCase )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
lowercase :List[Any] = getattr(lowerCamelCase, "all_model_classes", [] )
if len(lowerCamelCase ) > 0:
test_classes.append(lowerCamelCase )
# sort with class names
return sorted(lowerCamelCase, key=lambda lowerCamelCase : x.__name__ )
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :List[Any] = get_test_classes(lowerCamelCase )
lowercase :List[str] = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(lowerCamelCase, key=lambda lowerCamelCase : x.__name__ )
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :str = test_class()
if hasattr(lowerCamelCase, "setUp" ):
test.setUp()
lowercase :str = None
if hasattr(lowerCamelCase, "model_tester" ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
lowercase :Optional[int] = test.model_tester.__class__
return model_tester
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
lowercase :List[str] = get_test_classes(lowerCamelCase )
lowercase :int = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(lowerCamelCase )
# sort with class names
return sorted(lowerCamelCase, key=lambda lowerCamelCase : x.__name__ )
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase ):
lowercase :Union[str, Any] = get_test_classes_for_model(lowerCamelCase, lowerCamelCase )
lowercase :Dict = []
for test_class in test_classes:
lowercase :Optional[Any] = get_model_tester_from_test_class(lowerCamelCase )
if tester_class is not None:
tester_classes.append(lowerCamelCase )
# sort with class names
return sorted(lowerCamelCase, key=lambda lowerCamelCase : x.__name__ )
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :Optional[int] = get_test_classes(lowerCamelCase )
lowercase :List[str] = {test_class: get_model_tester_from_test_class(lowerCamelCase ) for test_class in test_classes}
return test_tester_mapping
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :Union[str, Any] = get_model_classes(lowerCamelCase )
lowercase :List[Any] = {
model_class: get_test_classes_for_model(lowerCamelCase, lowerCamelCase ) for model_class in model_classes
}
return model_test_mapping
def UpperCAmelCase__ ( lowerCamelCase ):
lowercase :int = get_model_classes(lowerCamelCase )
lowercase :Dict = {
model_class: get_tester_classes_for_model(lowerCamelCase, lowerCamelCase ) for model_class in model_classes
}
return model_to_tester_mapping
def UpperCAmelCase__ ( lowerCamelCase ):
if isinstance(lowerCamelCase, lowerCamelCase ):
return o
elif isinstance(lowerCamelCase, lowerCamelCase ):
return o.__name__
elif isinstance(lowerCamelCase, (list, tuple) ):
return [to_json(lowerCamelCase ) for x in o]
elif isinstance(lowerCamelCase, lowerCamelCase ):
return {to_json(lowerCamelCase ): to_json(lowerCamelCase ) for k, v in o.items()}
else:
return o
| 453 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_UpperCAmelCase : List[Any] = {
"configuration_swinv2": ["SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP", "Swinv2Config"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Union[str, Any] = [
"SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST",
"Swinv2ForImageClassification",
"Swinv2ForMaskedImageModeling",
"Swinv2Model",
"Swinv2PreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 453 | 1 |
"""simple docstring"""
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
snake_case = random.Random()
def snake_case ( lowerCAmelCase_ , lowerCAmelCase_=1.0 , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> List[str]:
if rng is None:
_snake_case = global_rng
_snake_case = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class UpperCAmelCase ( unittest.TestCase ):
def __init__( self : Tuple , __lowerCamelCase : List[str] , __lowerCamelCase : Optional[Any]=7 , __lowerCamelCase : List[Any]=4_0_0 , __lowerCamelCase : Any=2_0_0_0 , __lowerCamelCase : Any=2_0_4_8 , __lowerCamelCase : Any=1_2_8 , __lowerCamelCase : Any=1 , __lowerCamelCase : Optional[int]=5_1_2 , __lowerCamelCase : Tuple=3_0 , __lowerCamelCase : List[Any]=4_4_1_0_0 , ):
"""simple docstring"""
_snake_case = parent
_snake_case = batch_size
_snake_case = min_seq_length
_snake_case = max_seq_length
_snake_case = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_snake_case = spectrogram_length
_snake_case = feature_size
_snake_case = num_audio_channels
_snake_case = hop_length
_snake_case = chunk_length
_snake_case = sampling_rate
def __UpperCAmelCase ( self : List[Any] ):
"""simple docstring"""
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Any=False , __lowerCamelCase : int=False ):
"""simple docstring"""
def _flatten(__lowerCamelCase : List[str] ):
return list(itertools.chain(*__lowerCamelCase ) )
if equal_length:
_snake_case = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_snake_case = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_snake_case = [np.asarray(__lowerCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE,unittest.TestCase ):
A__ : Tuple = TvltFeatureExtractor
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_snake_case = TvltFeatureExtractionTester(self )
def __UpperCAmelCase ( self : Any ):
"""simple docstring"""
_snake_case = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(__lowerCamelCase , '''spectrogram_length''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''feature_size''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''num_audio_channels''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''hop_length''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''chunk_length''' ) )
self.assertTrue(hasattr(__lowerCamelCase , '''sampling_rate''' ) )
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_snake_case = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case = feat_extract_first.save_pretrained(__lowerCamelCase )[0]
check_json_file_has_correct_format(__lowerCamelCase )
_snake_case = self.feature_extraction_class.from_pretrained(__lowerCamelCase )
_snake_case = feat_extract_first.to_dict()
_snake_case = feat_extract_second.to_dict()
_snake_case = dict_first.pop('''mel_filters''' )
_snake_case = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : int ):
"""simple docstring"""
_snake_case = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
_snake_case = os.path.join(__lowerCamelCase , '''feat_extract.json''' )
feat_extract_first.to_json_file(__lowerCamelCase )
_snake_case = self.feature_extraction_class.from_json_file(__lowerCamelCase )
_snake_case = feat_extract_first.to_dict()
_snake_case = feat_extract_second.to_dict()
_snake_case = dict_first.pop('''mel_filters''' )
_snake_case = dict_second.pop('''mel_filters''' )
self.assertTrue(np.allclose(__lowerCamelCase , __lowerCamelCase ) )
self.assertEqual(__lowerCamelCase , __lowerCamelCase )
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
# Initialize feature_extractor
_snake_case = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
_snake_case = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
_snake_case = [np.asarray(__lowerCamelCase ) for speech_input in speech_inputs]
# Test not batched input
_snake_case = feature_extractor(np_speech_inputs[0] , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
_snake_case = feature_extractor(__lowerCamelCase , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
_snake_case = feature_extractor(
__lowerCamelCase , return_tensors='''np''' , sampling_rate=4_4_1_0_0 , mask_audio=__lowerCamelCase ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
_snake_case = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
_snake_case = np.asarray(__lowerCamelCase )
_snake_case = feature_extractor(__lowerCamelCase , return_tensors='''np''' , sampling_rate=4_4_1_0_0 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCamelCase : Optional[int] ):
"""simple docstring"""
_snake_case = load_dataset('''hf-internal-testing/librispeech_asr_dummy''' , '''clean''' , split='''validation''' )
# automatic decoding with librispeech
_snake_case = ds.sort('''id''' ).select(range(__lowerCamelCase ) )[:num_samples]['''audio''']
return [x["array"] for x in speech_samples]
def __UpperCAmelCase ( self : Dict ):
"""simple docstring"""
_snake_case = self._load_datasamples(1 )
_snake_case = TvltFeatureExtractor()
_snake_case = feature_extractor(__lowerCamelCase , return_tensors='''pt''' ).audio_values
self.assertEquals(audio_values.shape , (1, 1, 1_9_2, 1_2_8) )
_snake_case = torch.tensor([[-0.3_0_3_2, -0.2_7_0_8], [-0.4_4_3_4, -0.4_0_0_7]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] , __lowerCamelCase , atol=1E-4 ) )
| 103 |
class __snake_case :
'''simple docstring'''
def __init__( self , A_ , A_ , A_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = name
SCREAMING_SNAKE_CASE__ = value
SCREAMING_SNAKE_CASE__ = weight
def __repr__( self ):
'''simple docstring'''
return f'''{self.__class__.__name__}({self.name}, {self.value}, {self.weight})'''
def lowercase_ ( self ):
'''simple docstring'''
return self.value
def lowercase_ ( self ):
'''simple docstring'''
return self.name
def lowercase_ ( self ):
'''simple docstring'''
return self.weight
def lowercase_ ( self ):
'''simple docstring'''
return self.value / self.weight
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
SCREAMING_SNAKE_CASE__ = []
for i in range(len(lowerCAmelCase_ ) ):
menu.append(Things(name[i] , value[i] , weight[i] ) )
return menu
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> Dict:
SCREAMING_SNAKE_CASE__ = sorted(lowerCAmelCase_ , key=lowerCAmelCase_ , reverse=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = 0.0, 0.0
for i in range(len(lowerCAmelCase_ ) ):
if (total_cost + items_copy[i].get_weight()) <= max_cost:
result.append(items_copy[i] )
total_cost += items_copy[i].get_weight()
total_value += items_copy[i].get_value()
return (result, total_value)
def __snake_case ( ) -> str:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 100 | 0 |
'''simple docstring'''
import argparse
import glob
import logging
import os
import time
from argparse import Namespace
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from torch.utils.data import DataLoader, TensorDataset
from transformers import glue_compute_metrics as compute_metrics
from transformers import glue_convert_examples_to_features as convert_examples_to_features
from transformers import glue_output_modes, glue_tasks_num_labels
from transformers import glue_processors as processors
UpperCAmelCase_ = logging.getLogger(__name__)
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = 'sequence-classification'
def __init__( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
if type(SCREAMING_SNAKE_CASE__ ) == dict:
_snake_case : Dict = Namespace(**SCREAMING_SNAKE_CASE__ )
_snake_case : int = glue_output_modes[hparams.task]
_snake_case : Union[str, Any] = glue_tasks_num_labels[hparams.task]
super().__init__(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , self.mode )
def __lowerCamelCase( self , **SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
return self.model(**SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_snake_case : int = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_snake_case : Optional[int] = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
_snake_case : Union[str, Any] = self(**SCREAMING_SNAKE_CASE__ )
_snake_case : List[Any] = outputs[0]
_snake_case : Dict = self.trainer.lr_schedulers[0]["""scheduler"""]
_snake_case : List[str] = {"""loss""": loss, """rate""": lr_scheduler.get_last_lr()[-1]}
return {"loss": loss, "log": tensorboard_logs}
def __lowerCamelCase( self ):
"""simple docstring"""
_snake_case : List[Any] = self.hparams
_snake_case : Optional[Any] = processors[args.task]()
_snake_case : Optional[int] = processor.get_labels()
for mode in ["train", "dev"]:
_snake_case : Optional[int] = self._feature_file(SCREAMING_SNAKE_CASE__ )
if os.path.exists(SCREAMING_SNAKE_CASE__ ) and not args.overwrite_cache:
logger.info("""Loading features from cached file %s""" , SCREAMING_SNAKE_CASE__ )
else:
logger.info("""Creating features from dataset file at %s""" , args.data_dir )
_snake_case : List[Any] = (
processor.get_dev_examples(args.data_dir )
if mode == """dev"""
else processor.get_train_examples(args.data_dir )
)
_snake_case : Optional[Any] = convert_examples_to_features(
SCREAMING_SNAKE_CASE__ , self.tokenizer , max_length=args.max_seq_length , label_list=self.labels , output_mode=args.glue_output_mode , )
logger.info("""Saving features into cached file %s""" , SCREAMING_SNAKE_CASE__ )
torch.save(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = False ):
"""simple docstring"""
_snake_case : int = """dev""" if mode == """test""" else mode
_snake_case : Optional[Any] = self._feature_file(SCREAMING_SNAKE_CASE__ )
logger.info("""Loading features from cached file %s""" , SCREAMING_SNAKE_CASE__ )
_snake_case : Union[str, Any] = torch.load(SCREAMING_SNAKE_CASE__ )
_snake_case : Optional[int] = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
_snake_case : List[Any] = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
_snake_case : List[Any] = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
if self.hparams.glue_output_mode == "classification":
_snake_case : str = torch.tensor([f.label for f in features] , dtype=torch.long )
elif self.hparams.glue_output_mode == "regression":
_snake_case : Optional[int] = torch.tensor([f.label for f in features] , dtype=torch.float )
return DataLoader(
TensorDataset(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , batch_size=SCREAMING_SNAKE_CASE__ , shuffle=SCREAMING_SNAKE_CASE__ , )
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_snake_case : Optional[int] = {"""input_ids""": batch[0], """attention_mask""": batch[1], """labels""": batch[3]}
if self.config.model_type not in ["distilbert", "bart"]:
_snake_case : Optional[int] = batch[2] if self.config.model_type in ["""bert""", """xlnet""", """albert"""] else None
_snake_case : str = self(**SCREAMING_SNAKE_CASE__ )
_snake_case : Optional[int] = outputs[:2]
_snake_case : Union[str, Any] = logits.detach().cpu().numpy()
_snake_case : Tuple = inputs["""labels"""].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_snake_case : int = torch.stack([x["""val_loss"""] for x in outputs] ).mean().detach().cpu().item()
_snake_case : Optional[int] = np.concatenate([x["""pred"""] for x in outputs] , axis=0 )
if self.hparams.glue_output_mode == "classification":
_snake_case : Tuple = np.argmax(SCREAMING_SNAKE_CASE__ , axis=1 )
elif self.hparams.glue_output_mode == "regression":
_snake_case : Optional[Any] = np.squeeze(SCREAMING_SNAKE_CASE__ )
_snake_case : Optional[Any] = np.concatenate([x["""target"""] for x in outputs] , axis=0 )
_snake_case : Dict = [[] for _ in range(out_label_ids.shape[0] )]
_snake_case : Optional[Any] = [[] for _ in range(out_label_ids.shape[0] )]
_snake_case : Optional[int] = {**{"""val_loss""": val_loss_mean}, **compute_metrics(self.hparams.task , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
_snake_case : Optional[Any] = dict(results.items() )
_snake_case : Tuple = results
return ret, preds_list, out_label_list
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_snake_case : List[str] = self._eval_end(SCREAMING_SNAKE_CASE__ )
_snake_case : List[Any] = ret["""log"""]
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_snake_case : Tuple = self._eval_end(SCREAMING_SNAKE_CASE__ )
_snake_case : Any = ret["""log"""]
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def __lowerCamelCase( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
BaseTransformer.add_model_specific_args(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
parser.add_argument(
"""--max_seq_length""" , default=1_28 , type=SCREAMING_SNAKE_CASE__ , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--task""" , default="""""" , type=SCREAMING_SNAKE_CASE__ , required=SCREAMING_SNAKE_CASE__ , help="""The GLUE task to run""" , )
parser.add_argument(
"""--gpus""" , default=0 , type=SCREAMING_SNAKE_CASE__ , help="""The number of GPUs allocated for this, it is by default 0 meaning none""" , )
parser.add_argument(
"""--overwrite_cache""" , action="""store_true""" , help="""Overwrite the cached training and evaluation sets""" )
return parser
def UpperCAmelCase ( ) -> Tuple:
_snake_case : List[Any] = argparse.ArgumentParser()
add_generic_args(A__ , os.getcwd() )
_snake_case : Optional[int] = GLUETransformer.add_model_specific_args(A__ , os.getcwd() )
_snake_case : Union[str, Any] = parser.parse_args()
# If output_dir not provided, a folder will be generated in pwd
if args.output_dir is None:
_snake_case : Union[str, Any] = os.path.join(
"""./results""" , f'''{args.task}_{time.strftime("%Y%m%d_%H%M%S" )}''' , )
os.makedirs(args.output_dir )
_snake_case : Any = GLUETransformer(A__ )
_snake_case : Tuple = generic_train(A__ , A__ )
# Optionally, predict on dev set and write to output_dir
if args.do_predict:
_snake_case : List[Any] = sorted(glob.glob(os.path.join(args.output_dir , """checkpoint-epoch=*.ckpt""" ) , recursive=A__ ) )
_snake_case : Dict = model.load_from_checkpoint(checkpoints[-1] )
return trainer.test(A__ )
if __name__ == "__main__":
main()
| 716 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {
'''configuration_blip_2''': [
'''BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Blip2Config''',
'''Blip2QFormerConfig''',
'''Blip2VisionConfig''',
],
'''processing_blip_2''': ['''Blip2Processor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
'''BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Blip2Model''',
'''Blip2QFormerModel''',
'''Blip2PreTrainedModel''',
'''Blip2ForConditionalGeneration''',
'''Blip2VisionModel''',
]
if TYPE_CHECKING:
from .configuration_blip_a import (
BLIP_2_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlipaConfig,
BlipaQFormerConfig,
BlipaVisionConfig,
)
from .processing_blip_a import BlipaProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip_a import (
BLIP_2_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipaForConditionalGeneration,
BlipaModel,
BlipaPreTrainedModel,
BlipaQFormerModel,
BlipaVisionModel,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 519 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_speech_available, is_torch_available
__lowerCAmelCase : List[Any] ={
'configuration_audio_spectrogram_transformer': [
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ASTConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str =[
'AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ASTForAudioClassification',
'ASTModel',
'ASTPreTrainedModel',
]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : str =['ASTFeatureExtractor']
if TYPE_CHECKING:
from .configuration_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
ASTConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ASTForAudioClassification,
ASTModel,
ASTPreTrainedModel,
)
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
else:
import sys
__lowerCAmelCase : str =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 696 |
import os
from datetime import datetime as dt
from github import Github
__lowerCAmelCase : List[Any] =[
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def _UpperCamelCase ( ):
__SCREAMING_SNAKE_CASE : Tuple = Github(os.environ['''GITHUB_TOKEN'''] )
__SCREAMING_SNAKE_CASE : Union[str, Any] = g.get_repo('''huggingface/diffusers''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = repo.get_issues(state='''open''' )
for issue in open_issues:
__SCREAMING_SNAKE_CASE : Optional[int] = sorted(issue.get_comments() , key=lambda lowercase__ : i.created_at , reverse=lowercase__ )
__SCREAMING_SNAKE_CASE : List[Any] = comments[0] if len(lowercase__ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state='''closed''' )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state='''open''' )
issue.remove_from_labels('''stale''' )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
'''This issue has been automatically marked as stale because it has not had '''
'''recent activity. If you think this still needs to be addressed '''
'''please comment on this thread.\n\nPlease note that issues that do not follow the '''
'''[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) '''
'''are likely to be ignored.''' )
issue.add_to_labels('''stale''' )
if __name__ == "__main__":
main()
| 696 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A : List[Any] = logging.get_logger(__name__)
A : Union[str, Any] = {
'camembert-base': 'https://huggingface.co/camembert-base/resolve/main/config.json',
'umberto-commoncrawl-cased-v1': (
'https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json'
),
'umberto-wikipedia-uncased-v1': (
'https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json'
),
}
class UpperCamelCase( _a ):
'''simple docstring'''
snake_case_ : Optional[Any] = """camembert"""
def __init__( self : str , SCREAMING_SNAKE_CASE : Optional[int]=3_0_5_2_2 , SCREAMING_SNAKE_CASE : List[Any]=7_6_8 , SCREAMING_SNAKE_CASE : Tuple=1_2 , SCREAMING_SNAKE_CASE : Optional[int]=1_2 , SCREAMING_SNAKE_CASE : Any=3_0_7_2 , SCREAMING_SNAKE_CASE : Union[str, Any]="gelu" , SCREAMING_SNAKE_CASE : Optional[int]=0.1 , SCREAMING_SNAKE_CASE : int=0.1 , SCREAMING_SNAKE_CASE : Optional[Any]=5_1_2 , SCREAMING_SNAKE_CASE : Union[str, Any]=2 , SCREAMING_SNAKE_CASE : Any=0.02 , SCREAMING_SNAKE_CASE : List[Any]=1e-1_2 , SCREAMING_SNAKE_CASE : List[str]=1 , SCREAMING_SNAKE_CASE : List[str]=0 , SCREAMING_SNAKE_CASE : int=2 , SCREAMING_SNAKE_CASE : Dict="absolute" , SCREAMING_SNAKE_CASE : List[str]=True , SCREAMING_SNAKE_CASE : int=None , **SCREAMING_SNAKE_CASE : Tuple , ) -> Any:
'''simple docstring'''
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE , bos_token_id=SCREAMING_SNAKE_CASE , eos_token_id=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = type_vocab_size
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = position_embedding_type
__snake_case = use_cache
__snake_case = classifier_dropout
class UpperCamelCase( _a ):
'''simple docstring'''
@property
def SCREAMING_SNAKE_CASE_ ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
'''simple docstring'''
if self.task == "multiple-choice":
__snake_case = {0: "batch", 1: "choice", 2: "sequence"}
else:
__snake_case = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 709 |
import os
from typing import Dict, List, Tuple, TypeVar, Union
A : List[Any] = TypeVar('T')
A : Dict = Union[List[T], Tuple[T, ...]]
A : Any = Union[T, List[T], Dict[str, T]]
A : Optional[int] = Union[str, bytes, os.PathLike]
| 473 | 0 |
"""simple docstring"""
import fire
from transformers import AutoConfig, AutoModelForSeqaSeqLM, AutoTokenizer
def lowerCamelCase__ ( __snake_case, __snake_case, **__snake_case ) -> Tuple:
"""simple docstring"""
_UpperCamelCase = AutoConfig.from_pretrained(__snake_case, **__snake_case )
_UpperCamelCase = AutoModelForSeqaSeqLM.from_config(__snake_case )
model.save_pretrained(__snake_case )
AutoTokenizer.from_pretrained(__snake_case ).save_pretrained(__snake_case )
return model
if __name__ == "__main__":
fire.Fire(save_randomly_initialized_version)
| 19 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase : float , _lowerCamelCase : float ) -> float:
if density <= 0:
raise ValueError('Impossible fluid density' )
if bulk_modulus <= 0:
raise ValueError('Impossible bulk modulus' )
return (bulk_modulus / density) ** 0.5
if __name__ == "__main__":
import doctest
doctest.testmod()
| 549 | 0 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( lowercase_ : Any , lowercase_ : Tuple ):
return int((input_a, input_a).count(0 ) == 0 )
def SCREAMING_SNAKE_CASE ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 719 |
'''simple docstring'''
def SCREAMING_SNAKE_CASE ( ):
lowercase = []
lowercase = 1
while len(lowercase_ ) < 1E6:
constant.append(str(lowercase_ ) )
i += 1
lowercase = """""".join(lowercase_ )
return (
int(constant[0] )
* int(constant[9] )
* int(constant[99] )
* int(constant[999] )
* int(constant[9999] )
* int(constant[9_9999] )
* int(constant[99_9999] )
)
if __name__ == "__main__":
print(solution())
| 653 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase (unittest.TestCase ):
@slow
def snake_case_ ( self: Any ):
'''simple docstring'''
__UpperCamelCase = TFCamembertModel.from_pretrained('jplu/tf-camembert-base' )
__UpperCamelCase = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 2_5543, 110, 83, 6]],dtype=tf.intaa,) # J'aime le camembert !"
__UpperCamelCase = model(A_ )['last_hidden_state']
__UpperCamelCase = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape,A_ )
# compare the actual values for a slice.
__UpperCamelCase = tf.convert_to_tensor(
[[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]],dtype=tf.floataa,)
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy(),expected_slice.numpy(),atol=1E-4 ) )
| 1 |
'''simple docstring'''
import argparse
from transformers import BigBirdConfig, BigBirdForPreTraining, BigBirdForQuestionAnswering, load_tf_weights_in_big_bird
from transformers.utils import logging
logging.set_verbosity_info()
def A ( UpperCamelCase_ : Optional[int] , UpperCamelCase_ : Dict , UpperCamelCase_ : Dict , UpperCamelCase_ : int ) -> Any:
'''simple docstring'''
lowerCAmelCase__ = BigBirdConfig.from_json_file(UpperCamelCase_ )
print(F"""Building PyTorch model from configuration: {config}""" )
if is_trivia_qa:
lowerCAmelCase__ = BigBirdForQuestionAnswering(UpperCamelCase_ )
else:
lowerCAmelCase__ = BigBirdForPreTraining(UpperCamelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_big_bird(UpperCamelCase_ , UpperCamelCase_ , is_trivia_qa=UpperCamelCase_ )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
model.save_pretrained(UpperCamelCase_ )
if __name__ == "__main__":
UpperCAmelCase__ : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--big_bird_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained BERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_trivia_qa", action="store_true", help="Whether to convert a model with a trivia_qa head."
)
UpperCAmelCase__ : int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(
args.tf_checkpoint_path, args.big_bird_config_file, args.pytorch_dump_path, args.is_trivia_qa
)
| 48 | 0 |
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowercase_ ( lowerCAmelCase__ ):
_lowerCamelCase = ["image_processor", "tokenizer"]
_lowerCamelCase = "AutoImageProcessor"
_lowerCamelCase = "AutoTokenizer"
def __init__( self , lowercase_ , lowercase_ ):
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_snake_case : Dict = self.image_processor
def __call__( self , lowercase_=None , lowercase_=None , lowercase_=None , **lowercase_ ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
_snake_case : Optional[Any] = self.tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if images is not None:
_snake_case : Optional[int] = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None and images is not None:
_snake_case : Dict = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_SCREAMING_SNAKE_CASE ) , tensor_type=_SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self , *lowercase_ , **lowercase_ ):
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@property
def UpperCamelCase ( self ):
return ["input_ids", "attention_mask", "pixel_values"] | 709 | from typing import Callable, Dict, Optional, Tuple
import torch
from torch import nn
from torch.distributions import (
AffineTransform,
Distribution,
Independent,
NegativeBinomial,
Normal,
StudentT,
TransformedDistribution,
)
class lowercase_ ( __snake_case ):
def __init__( self , lowercase_ , lowercase_=None , lowercase_=None , lowercase_=0 ):
_snake_case : Optional[Any] = 1.0 if scale is None else scale
_snake_case : Optional[Any] = 0.0 if loc is None else loc
super().__init__(lowercase_ , [AffineTransform(loc=self.loc , scale=self.scale , event_dim=lowercase_ )] )
@property
def UpperCamelCase ( self ):
return self.base_dist.mean * self.scale + self.loc
@property
def UpperCamelCase ( self ):
return self.base_dist.variance * self.scale**2
@property
def UpperCamelCase ( self ):
return self.variance.sqrt()
class lowercase_ ( nn.Module ):
def __init__( self , lowercase_ , lowercase_ , lowercase_ , **lowercase_ ):
super().__init__(**lowercase_ )
_snake_case : List[Any] = args_dim
_snake_case : Any = nn.ModuleList([nn.Linear(lowercase_ , lowercase_ ) for dim in args_dim.values()] )
_snake_case : List[Any] = domain_map
def UpperCamelCase ( self , lowercase_ ):
_snake_case : int = [proj(lowercase_ ) for proj in self.proj]
return self.domain_map(*lowercase_ )
class lowercase_ ( nn.Module ):
def __init__( self , lowercase_ ):
super().__init__()
_snake_case : Optional[int] = function
def UpperCamelCase ( self , lowercase_ , *lowercase_ ):
return self.function(lowercase_ , *lowercase_ )
class lowercase_ :
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = 42
def __init__( self , lowercase_ = 1 ):
_snake_case : Any = dim
_snake_case : Optional[int] = {k: dim * self.args_dim[k] for k in self.args_dim}
def UpperCamelCase ( self , lowercase_ ):
if self.dim == 1:
return self.distribution_class(*lowercase_ )
else:
return Independent(self.distribution_class(*lowercase_ ) , 1 )
def UpperCamelCase ( self , lowercase_ , lowercase_ = None , lowercase_ = None , ):
_snake_case : Union[str, Any] = self._base_distribution(lowercase_ )
if loc is None and scale is None:
return distr
else:
return AffineTransformed(lowercase_ , loc=lowercase_ , scale=lowercase_ , event_dim=self.event_dim )
@property
def UpperCamelCase ( self ):
return () if self.dim == 1 else (self.dim,)
@property
def UpperCamelCase ( self ):
return len(self.event_shape )
@property
def UpperCamelCase ( self ):
return 0.0
def UpperCamelCase ( self , lowercase_ ):
return ParameterProjection(
in_features=lowercase_ , args_dim=self.args_dim , domain_map=LambdaLayer(self.domain_map ) , )
def UpperCamelCase ( self , *lowercase_ ):
raise NotImplementedError()
@staticmethod
def UpperCamelCase ( lowercase_ ):
return (x + torch.sqrt(torch.square(lowercase_ ) + 4.0 )) / 2.0
class lowercase_ ( __snake_case ):
_lowerCamelCase = {"df": 1, "loc": 1, "scale": 1}
_lowerCamelCase = StudentT
@classmethod
def UpperCamelCase ( cls , lowercase_ , lowercase_ , lowercase_ ):
_snake_case : int = cls.squareplus(lowercase_ ).clamp_min(torch.finfo(scale.dtype ).eps )
_snake_case : Optional[Any] = 2.0 + cls.squareplus(lowercase_ )
return df.squeeze(-1 ), loc.squeeze(-1 ), scale.squeeze(-1 )
class lowercase_ ( __snake_case ):
_lowerCamelCase = {"loc": 1, "scale": 1}
_lowerCamelCase = Normal
@classmethod
def UpperCamelCase ( cls , lowercase_ , lowercase_ ):
_snake_case : Optional[int] = cls.squareplus(lowercase_ ).clamp_min(torch.finfo(scale.dtype ).eps )
return loc.squeeze(-1 ), scale.squeeze(-1 )
class lowercase_ ( __snake_case ):
_lowerCamelCase = {"total_count": 1, "logits": 1}
_lowerCamelCase = NegativeBinomial
@classmethod
def UpperCamelCase ( cls , lowercase_ , lowercase_ ):
_snake_case : Optional[Any] = cls.squareplus(lowercase_ )
return total_count.squeeze(-1 ), logits.squeeze(-1 )
def UpperCamelCase ( self , lowercase_ ):
_snake_case ,_snake_case : int = distr_args
if self.dim == 1:
return self.distribution_class(total_count=lowercase_ , logits=lowercase_ )
else:
return Independent(self.distribution_class(total_count=lowercase_ , logits=lowercase_ ) , 1 )
def UpperCamelCase ( self , lowercase_ , lowercase_ = None , lowercase_ = None ):
_snake_case ,_snake_case : int = distr_args
if scale is not None:
# See scaling property of Gamma.
logits += scale.log()
return self._base_distribution((total_count, logits) ) | 580 | 0 |
import os
import pickle
import unittest
from transformers import AutoTokenizer
from transformers.models.bert.tokenization_bert import BertTokenizer
from transformers.models.bert_japanese.tokenization_bert_japanese import (
VOCAB_FILES_NAMES,
BertJapaneseTokenizer,
CharacterTokenizer,
JumanppTokenizer,
MecabTokenizer,
SudachiTokenizer,
WordpieceTokenizer,
)
from transformers.testing_utils import custom_tokenizers, require_jumanpp, require_sudachi
from ...test_tokenization_common import TokenizerTesterMixin
@custom_tokenizers
class __UpperCamelCase ( lowercase , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = BertJapaneseTokenizer
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = True
def __A ( self : Tuple ):
'''simple docstring'''
super().setUp()
UpperCAmelCase_ = [
"[UNK]",
"[CLS]",
"[SEP]",
"こんにちは",
"こん",
"にちは",
"ばんは",
"##こん",
"##にちは",
"##ばんは",
"世界",
"##世界",
"、",
"##、",
"。",
"##。",
]
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def __A ( self : Tuple , lowerCAmelCase : Dict ):
'''simple docstring'''
UpperCAmelCase_ = "こんにちは、世界。 \nこんばんは、世界。"
UpperCAmelCase_ = "こんにちは 、 世界 。 こんばんは 、 世界 。"
return input_text, output_text
def __A ( self : Any , lowerCAmelCase : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ , UpperCAmelCase_ = self.get_input_output_texts(lowerCAmelCase )
UpperCAmelCase_ = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
UpperCAmelCase_ = tokenizer.decode(lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase )
return text, ids
def __A ( self : str ):
'''simple docstring'''
pass # TODO add if relevant
def __A ( self : Optional[Any] ):
'''simple docstring'''
pass # TODO add if relevant
def __A ( self : str ):
'''simple docstring'''
pass # TODO add if relevant
def __A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer_class(self.vocab_file )
UpperCAmelCase_ = tokenizer.tokenize("こんにちは、世界。\nこんばんは、世界。" )
self.assertListEqual(lowerCAmelCase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
def __A ( self : str ):
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer_class(self.vocab_file , word_tokenizer_type="mecab" )
self.assertIsNotNone(lowerCAmelCase )
UpperCAmelCase_ = "こんにちは、世界。\nこんばんは、世界。"
UpperCAmelCase_ = tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
UpperCAmelCase_ = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(lowerCAmelCase , "wb" ) as handle:
pickle.dump(lowerCAmelCase , lowerCAmelCase )
with open(lowerCAmelCase , "rb" ) as handle:
UpperCAmelCase_ = pickle.load(lowerCAmelCase )
UpperCAmelCase_ = tokenizer_new.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
def __A ( self : int ):
'''simple docstring'''
UpperCAmelCase_ = MecabTokenizer(mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def __A ( self : List[str] ):
'''simple docstring'''
try:
UpperCAmelCase_ = MecabTokenizer(mecab_dic="unidic_lite" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def __A ( self : int ):
'''simple docstring'''
try:
UpperCAmelCase_ = MecabTokenizer(mecab_dic="unidic" )
except ModuleNotFoundError:
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def __A ( self : int ):
'''simple docstring'''
UpperCAmelCase_ = MecabTokenizer(do_lower_case=lowerCAmelCase , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iphone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
def __A ( self : List[Any] ):
'''simple docstring'''
try:
UpperCAmelCase_ = MecabTokenizer(
do_lower_case=lowerCAmelCase , normalize_text=lowerCAmelCase , mecab_option="-d /usr/local/lib/mecab/dic/jumandic" )
except RuntimeError:
# if dict doesn't exist in the system, previous code raises this error.
return
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "\u3000", "。"] , )
def __A ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ = MecabTokenizer(normalize_text=lowerCAmelCase , mecab_dic="ipadic" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップルストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", " ", "。"] , )
@require_sudachi
def __A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer_class(self.vocab_file , word_tokenizer_type="sudachi" )
self.assertIsNotNone(lowerCAmelCase )
UpperCAmelCase_ = "こんにちは、世界。\nこんばんは、世界。"
UpperCAmelCase_ = tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
UpperCAmelCase_ = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(lowerCAmelCase , "wb" ) as handle:
pickle.dump(lowerCAmelCase , lowerCAmelCase )
with open(lowerCAmelCase , "rb" ) as handle:
UpperCAmelCase_ = pickle.load(lowerCAmelCase )
UpperCAmelCase_ = tokenizer_new.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
@require_sudachi
def __A ( self : List[Any] ):
'''simple docstring'''
UpperCAmelCase_ = SudachiTokenizer(sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def __A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="A" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国", "人", "参政", "権"] )
@require_sudachi
def __A ( self : str ):
'''simple docstring'''
UpperCAmelCase_ = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="B" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人", "参政権"] )
@require_sudachi
def __A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ = SudachiTokenizer(sudachi_dict_type="core" , sudachi_split_mode="C" )
self.assertListEqual(tokenizer.tokenize("外国人参政権" ) , ["外国人参政権"] )
@require_sudachi
def __A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ = SudachiTokenizer(do_lower_case=lowerCAmelCase , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iphone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", " ", "。", " ", " "] , )
@require_sudachi
def __A ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ = SudachiTokenizer(normalize_text=lowerCAmelCase , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , [" ", "\t", "アップル", "ストア", "で", "iPhone", "8", " ", "が", " ", " ", "\n ", "発売", "さ", "れ", "た", "\u3000", "。", " ", " "] , )
@require_sudachi
def __A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ = SudachiTokenizer(trim_whitespace=lowerCAmelCase , sudachi_dict_type="core" )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れ", "た", "。"] , )
@require_jumanpp
def __A ( self : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer_class(self.vocab_file , word_tokenizer_type="jumanpp" )
self.assertIsNotNone(lowerCAmelCase )
UpperCAmelCase_ = "こんにちは、世界。\nこんばんは、世界。"
UpperCAmelCase_ = tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , ["こんにちは", "、", "世界", "。", "こん", "##ばんは", "、", "世界", "。"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , [3, 12, 10, 14, 4, 9, 12, 10, 14] )
UpperCAmelCase_ = os.path.join(self.tmpdirname , "tokenizer.bin" )
with open(lowerCAmelCase , "wb" ) as handle:
pickle.dump(lowerCAmelCase , lowerCAmelCase )
with open(lowerCAmelCase , "rb" ) as handle:
UpperCAmelCase_ = pickle.load(lowerCAmelCase )
UpperCAmelCase_ = tokenizer_new.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
@require_jumanpp
def __A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def __A ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase_ = JumanppTokenizer(do_lower_case=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iphone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def __A ( self : Any ):
'''simple docstring'''
UpperCAmelCase_ = JumanppTokenizer(normalize_text=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["ア", "ッ", "フ", "゚", "ル", "ストア", "で", "iPhone", "8", "\u3000", "が", "\u3000", "\u3000", "\u3000", "発売", "さ", "れた", "\u3000", "。"] , )
@require_jumanpp
def __A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ = JumanppTokenizer(trim_whitespace=lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(" \tアップルストアでiPhone8 が \n 発売された 。 " ) , ["アップル", "ストア", "で", "iPhone", "8", "が", "発売", "さ", "れた", "。"] , )
@require_jumanpp
def __A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ = JumanppTokenizer()
self.assertListEqual(
tokenizer.tokenize("ありがとうございますm(_ _)m見つけるのが大変です。" ) , ["ありがとう", "ございます", "m(_ _)m", "見つける", "の", "が", "大変です", "。"] , )
def __A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ = ["[UNK]", "[CLS]", "[SEP]", "こんにちは", "こん", "にちは", "ばんは", "##こん", "##にちは", "##ばんは"]
UpperCAmelCase_ = {}
for i, token in enumerate(lowerCAmelCase ):
UpperCAmelCase_ = i
UpperCAmelCase_ = WordpieceTokenizer(vocab=lowerCAmelCase , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こんにちは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは" ) , ["こん", "##ばんは"] )
self.assertListEqual(tokenizer.tokenize("こんばんは こんばんにちは こんにちは" ) , ["こん", "##ばんは", "[UNK]", "こんにちは"] )
def __A ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase_ = BertJapaneseTokenizer.from_pretrained("nlp-waseda/roberta-base-japanese-with-auto-jumanpp" )
UpperCAmelCase_ = tokenizer.subword_tokenizer
UpperCAmelCase_ = subword_tokenizer.tokenize("国境 の 長い トンネル を 抜ける と 雪国 であった 。" )
self.assertListEqual(lowerCAmelCase , ["▁国境", "▁の", "▁長い", "▁トンネル", "▁を", "▁抜ける", "▁と", "▁雪", "国", "▁であった", "▁。"] )
UpperCAmelCase_ = subword_tokenizer.tokenize("こんばんは こんばん にち は こんにちは" )
self.assertListEqual(lowerCAmelCase , ["▁こん", "ばん", "は", "▁こん", "ばん", "▁に", "ち", "▁は", "▁こんにちは"] )
def __A ( self : str ):
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese" )
UpperCAmelCase_ = tokenizer.encode("ありがとう。" , add_special_tokens=lowerCAmelCase )
UpperCAmelCase_ = tokenizer.encode("どういたしまして。" , add_special_tokens=lowerCAmelCase )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase , lowerCAmelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __UpperCamelCase ( lowercase , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = BertJapaneseTokenizer
SCREAMING_SNAKE_CASE__ = False
def __A ( self : List[Any] ):
'''simple docstring'''
super().setUp()
UpperCAmelCase_ = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
UpperCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
def __A ( self : str , **lowerCAmelCase : int ):
'''simple docstring'''
return BertJapaneseTokenizer.from_pretrained(self.tmpdirname , subword_tokenizer_type="character" , **lowerCAmelCase )
def __A ( self : Union[str, Any] , lowerCAmelCase : Tuple ):
'''simple docstring'''
UpperCAmelCase_ = "こんにちは、世界。 \nこんばんは、世界。"
UpperCAmelCase_ = "こ ん に ち は 、 世 界 。 こ ん ば ん は 、 世 界 。"
return input_text, output_text
def __A ( self : Any ):
'''simple docstring'''
pass # TODO add if relevant
def __A ( self : List[Any] ):
'''simple docstring'''
pass # TODO add if relevant
def __A ( self : Dict ):
'''simple docstring'''
pass # TODO add if relevant
def __A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer_class(self.vocab_file , subword_tokenizer_type="character" )
UpperCAmelCase_ = tokenizer.tokenize("こんにちは、世界。 \nこんばんは、世界。" )
self.assertListEqual(
lowerCAmelCase , ["こ", "ん", "に", "ち", "は", "、", "世", "界", "。", "こ", "ん", "ば", "ん", "は", "、", "世", "界", "。"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , [3, 4, 5, 6, 7, 11, 9, 10, 12, 3, 4, 8, 4, 7, 11, 9, 10, 12] )
def __A ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase_ = ["[UNK]", "[CLS]", "[SEP]", "こ", "ん", "に", "ち", "は", "ば", "世", "界", "、", "。"]
UpperCAmelCase_ = {}
for i, token in enumerate(lowerCAmelCase ):
UpperCAmelCase_ = i
UpperCAmelCase_ = CharacterTokenizer(vocab=lowerCAmelCase , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("こんにちは" ) , ["こ", "ん", "に", "ち", "は"] )
self.assertListEqual(tokenizer.tokenize("こんにちほ" ) , ["こ", "ん", "に", "ち", "[UNK]"] )
def __A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase_ = self.tokenizer_class.from_pretrained("cl-tohoku/bert-base-japanese-char" )
UpperCAmelCase_ = tokenizer.encode("ありがとう。" , add_special_tokens=lowerCAmelCase )
UpperCAmelCase_ = tokenizer.encode("どういたしまして。" , add_special_tokens=lowerCAmelCase )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase )
UpperCAmelCase_ = tokenizer.build_inputs_with_special_tokens(lowerCAmelCase , lowerCAmelCase )
# 2 is for "[CLS]", 3 is for "[SEP]"
assert encoded_sentence == [2] + text + [3]
assert encoded_pair == [2] + text + [3] + text_a + [3]
@custom_tokenizers
class __UpperCamelCase ( unittest.TestCase ):
def __A ( self : str ):
'''simple docstring'''
UpperCAmelCase_ = "cl-tohoku/bert-base-japanese"
UpperCAmelCase_ = AutoTokenizer.from_pretrained(lowerCAmelCase )
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
class __UpperCamelCase ( unittest.TestCase ):
def __A ( self : Dict ):
'''simple docstring'''
UpperCAmelCase_ = "cl-tohoku/bert-base-japanese"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertTokenizer.from_pretrained(lowerCAmelCase )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) )
UpperCAmelCase_ = "bert-base-cased"
with self.assertLogs("transformers" , level="WARNING" ) as cm:
BertJapaneseTokenizer.from_pretrained(lowerCAmelCase )
self.assertTrue(
cm.records[0].message.startswith(
"The tokenizer class you load from this checkpoint is not the same type as the class this function"
" is called from." ) ) | 162 |
from __future__ import annotations
import pandas as pd
def __lowerCAmelCase ( A , A , A ):
UpperCAmelCase_ = [0] * no_of_processes
UpperCAmelCase_ = [0] * no_of_processes
# Copy the burst time into remaining_time[]
for i in range(A ):
UpperCAmelCase_ = burst_time[i]
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 999999999
UpperCAmelCase_ = 0
UpperCAmelCase_ = False
# Process until all processes are completed
while complete != no_of_processes:
for j in range(A ):
if arrival_time[j] <= increment_time and remaining_time[j] > 0:
if remaining_time[j] < minm:
UpperCAmelCase_ = remaining_time[j]
UpperCAmelCase_ = j
UpperCAmelCase_ = True
if not check:
increment_time += 1
continue
remaining_time[short] -= 1
UpperCAmelCase_ = remaining_time[short]
if minm == 0:
UpperCAmelCase_ = 999999999
if remaining_time[short] == 0:
complete += 1
UpperCAmelCase_ = False
# Find finish time of current process
UpperCAmelCase_ = increment_time + 1
# Calculate waiting time
UpperCAmelCase_ = finish_time - arrival_time[short]
UpperCAmelCase_ = finar - burst_time[short]
if waiting_time[short] < 0:
UpperCAmelCase_ = 0
# Increment time
increment_time += 1
return waiting_time
def __lowerCAmelCase ( A , A , A ):
UpperCAmelCase_ = [0] * no_of_processes
for i in range(A ):
UpperCAmelCase_ = burst_time[i] + waiting_time[i]
return turn_around_time
def __lowerCAmelCase ( A , A , A ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
for i in range(A ):
UpperCAmelCase_ = total_waiting_time + waiting_time[i]
UpperCAmelCase_ = total_turn_around_time + turn_around_time[i]
print(F"Average waiting time = {total_waiting_time / no_of_processes:.5f}" )
print("Average turn around time =" , total_turn_around_time / no_of_processes )
if __name__ == "__main__":
print("""Enter how many process you want to analyze""")
_a: str = int(input())
_a: List[str] = [0] * no_of_processes
_a: Dict = [0] * no_of_processes
_a: Optional[int] = list(range(1, no_of_processes + 1))
for i in range(no_of_processes):
print("""Enter the arrival time and burst time for process:--""" + str(i + 1))
_a , _a: List[str] = map(int, input().split())
_a: List[str] = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
_a: Optional[int] = burst_time
_a: List[Any] = no_of_processes
_a: List[Any] = waiting_time
_a: List[Any] = calculate_turnaroundtime(bt, n, wt)
calculate_average_times(waiting_time, turn_around_time, no_of_processes)
_a: Any = pd.DataFrame(
list(zip(processes, burst_time, arrival_time, waiting_time, turn_around_time)),
columns=[
"""Process""",
"""BurstTime""",
"""ArrivalTime""",
"""WaitingTime""",
"""TurnAroundTime""",
],
)
# Printing the dataFrame
pd.set_option("""display.max_rows""", fcfs.shape[0] + 1)
print(fcfs) | 162 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_camembert import CamembertTokenizer
else:
_lowerCAmelCase = None
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {"vocab_file": "sentencepiece.bpe.model", "tokenizer_file": "tokenizer.json"}
_lowerCAmelCase = {
"vocab_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_file": {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/tokenizer.json",
},
}
_lowerCAmelCase = {
"camembert-base": 5_1_2,
}
_lowerCAmelCase = "▁"
class snake_case ( _UpperCamelCase ):
"""simple docstring"""
_lowerCAmelCase = VOCAB_FILES_NAMES
_lowerCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCAmelCase = ["input_ids", "attention_mask"]
_lowerCAmelCase = CamembertTokenizer
def __init__( self , lowerCamelCase=None , lowerCamelCase=None , lowerCamelCase="<s>" , lowerCamelCase="</s>" , lowerCamelCase="</s>" , lowerCamelCase="<s>" , lowerCamelCase="<unk>" , lowerCamelCase="<pad>" , lowerCamelCase="<mask>" , lowerCamelCase=["<s>NOTUSED", "</s>NOTUSED"] , **lowerCamelCase , ) -> str:
"""simple docstring"""
snake_case__ : str = AddedToken(__a , lstrip=__a , rstrip=__a ) if isinstance(__a , __a ) else mask_token
super().__init__(
__a , tokenizer_file=__a , bos_token=__a , eos_token=__a , sep_token=__a , cls_token=__a , unk_token=__a , pad_token=__a , mask_token=__a , additional_special_tokens=__a , **__a , )
snake_case__ : List[Any] = vocab_file
snake_case__ : Tuple = False if not self.vocab_file else True
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ : Optional[int] = [self.cls_token_id]
snake_case__ : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> List[int]:
"""simple docstring"""
snake_case__ : Union[str, Any] = [self.sep_token_id]
snake_case__ : Optional[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowercase__ ( self , lowerCamelCase , lowerCamelCase = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(__a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : List[Any] = os.path.join(
__a , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ):
copyfile(self.vocab_file , __a )
return (out_vocab_file,)
| 707 |
'''simple docstring'''
from urllib.parse import quote
import pytest
from datasets.utils.hub import hf_hub_url
@pytest.mark.parametrize('''repo_id''' , ['''canonical_dataset_name''', '''org-name/dataset-name'''] )
@pytest.mark.parametrize('''path''' , ['''filename.csv''', '''filename with blanks.csv'''] )
@pytest.mark.parametrize('''revision''' , [None, '''v2'''] )
def _A ( snake_case__ : Tuple , snake_case__ : int , snake_case__ : str ):
snake_case__ : List[Any] = hf_hub_url(repo_id=snake_case__ , path=snake_case__ , revision=snake_case__ )
assert url == f'''https://huggingface.co/datasets/{repo_id}/resolve/{revision or 'main'}/{quote(snake_case__ )}'''
| 694 | 0 |
'''simple docstring'''
from __future__ import annotations
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> bool:
_a : Union[str, Any] = get_failure_array(lowerCAmelCase_ )
# 2) Step through text searching for pattern
_a , _a : List[Any] = 0, 0 # index into text, pattern
while i < len(lowerCAmelCase_ ):
if pattern[j] == text[i]:
if j == (len(lowerCAmelCase_ ) - 1):
return True
j += 1
# if this is a prefix in our pattern
# just go back far enough to continue
elif j > 0:
_a : str = failure[j - 1]
continue
i += 1
return False
def __lowerCamelCase ( lowerCAmelCase_ ) -> list[int]:
_a : Dict = [0]
_a : Tuple = 0
_a : Any = 1
while j < len(lowerCAmelCase_ ):
if pattern[i] == pattern[j]:
i += 1
elif i > 0:
_a : List[Any] = failure[i - 1]
continue
j += 1
failure.append(lowerCAmelCase_ )
return failure
if __name__ == "__main__":
# Test 1)
__lowerCAmelCase = '''abc1abc12'''
__lowerCAmelCase = '''alskfjaldsabc1abc1abc12k23adsfabcabc'''
__lowerCAmelCase = '''alskfjaldsk23adsfabcabc'''
assert kmp(pattern, texta) and not kmp(pattern, texta)
# Test 2)
__lowerCAmelCase = '''ABABX'''
__lowerCAmelCase = '''ABABZABABYABABX'''
assert kmp(pattern, text)
# Test 3)
__lowerCAmelCase = '''AAAB'''
__lowerCAmelCase = '''ABAAAAAB'''
assert kmp(pattern, text)
# Test 4)
__lowerCAmelCase = '''abcdabcy'''
__lowerCAmelCase = '''abcxabcdabxabcdabcdabcy'''
assert kmp(pattern, text)
# Test 5)
__lowerCAmelCase = '''aabaabaaa'''
assert get_failure_array(pattern) == [0, 1, 0, 1, 2, 3, 4, 5, 2]
| 358 |
'''simple docstring'''
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 358 | 1 |
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class lowercase :
"""simple docstring"""
@staticmethod
def __UpperCAmelCase ( *lowerCamelCase_ : Tuple , **lowerCamelCase_ : int ):
'''simple docstring'''
pass
def A__( __lowerCAmelCase ):
_snake_case : int = hashlib.mda(image.tobytes() )
return m.hexdigest()[:10]
def A__( __lowerCAmelCase ):
_snake_case : Tuple = np.array(__lowerCAmelCase )
_snake_case : str = npimg.shape
return {"hash": hashimage(__lowerCAmelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class lowercase ( unittest.TestCase ):
"""simple docstring"""
_UpperCamelCase : Any = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
_UpperCamelCase : Union[str, Any] = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def __UpperCAmelCase ( self : Optional[int] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Dict ):
'''simple docstring'''
_snake_case : Optional[int] = MaskGenerationPipeline(model=lowerCamelCase_ , image_processor=lowerCamelCase_ )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def __UpperCAmelCase ( self : List[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[int] ):
'''simple docstring'''
pass
@require_tf
@unittest.skip('Image segmentation not implemented in TF' )
def __UpperCAmelCase ( self : List[Any] ):
'''simple docstring'''
pass
@slow
@require_torch
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : int = pipeline('mask-generation' , model='facebook/sam-vit-huge' )
_snake_case : Optional[Any] = image_segmenter('http://images.cocodataset.org/val2017/000000039769.jpg' , points_per_batch=2_56 )
# Shortening by hashing
_snake_case : int = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase_ ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (4_80, 6_40)}, 'scores': 1.0444},
{'mask': {'hash': '6affa964c6', 'shape': (4_80, 6_40)}, 'scores': 1.021},
{'mask': {'hash': 'dfe28a0388', 'shape': (4_80, 6_40)}, 'scores': 1.0167},
{'mask': {'hash': 'c0a5f4a318', 'shape': (4_80, 6_40)}, 'scores': 1.0132},
{'mask': {'hash': 'fe8065c197', 'shape': (4_80, 6_40)}, 'scores': 1.0053},
{'mask': {'hash': 'e2d0b7a0b7', 'shape': (4_80, 6_40)}, 'scores': 0.9967},
{'mask': {'hash': '453c7844bd', 'shape': (4_80, 6_40)}, 'scores': 0.993},
{'mask': {'hash': '3d44f2926d', 'shape': (4_80, 6_40)}, 'scores': 0.9909},
{'mask': {'hash': '64033ddc3f', 'shape': (4_80, 6_40)}, 'scores': 0.9879},
{'mask': {'hash': '801064ff79', 'shape': (4_80, 6_40)}, 'scores': 0.9834},
{'mask': {'hash': '6172f276ef', 'shape': (4_80, 6_40)}, 'scores': 0.9716},
{'mask': {'hash': 'b49e60e084', 'shape': (4_80, 6_40)}, 'scores': 0.9612},
{'mask': {'hash': 'a811e775fd', 'shape': (4_80, 6_40)}, 'scores': 0.9599},
{'mask': {'hash': 'a6a8ebcf4b', 'shape': (4_80, 6_40)}, 'scores': 0.9552},
{'mask': {'hash': '9d8257e080', 'shape': (4_80, 6_40)}, 'scores': 0.9532},
{'mask': {'hash': '32de6454a8', 'shape': (4_80, 6_40)}, 'scores': 0.9516},
{'mask': {'hash': 'af3d4af2c8', 'shape': (4_80, 6_40)}, 'scores': 0.9499},
{'mask': {'hash': '3c6db475fb', 'shape': (4_80, 6_40)}, 'scores': 0.9483},
{'mask': {'hash': 'c290813fb9', 'shape': (4_80, 6_40)}, 'scores': 0.9464},
{'mask': {'hash': 'b6f0b8f606', 'shape': (4_80, 6_40)}, 'scores': 0.943},
{'mask': {'hash': '92ce16bfdf', 'shape': (4_80, 6_40)}, 'scores': 0.943},
{'mask': {'hash': 'c749b25868', 'shape': (4_80, 6_40)}, 'scores': 0.9408},
{'mask': {'hash': 'efb6cab859', 'shape': (4_80, 6_40)}, 'scores': 0.9335},
{'mask': {'hash': '1ff2eafb30', 'shape': (4_80, 6_40)}, 'scores': 0.9326},
{'mask': {'hash': '788b798e24', 'shape': (4_80, 6_40)}, 'scores': 0.9262},
{'mask': {'hash': 'abea804f0e', 'shape': (4_80, 6_40)}, 'scores': 0.8999},
{'mask': {'hash': '7b9e8ddb73', 'shape': (4_80, 6_40)}, 'scores': 0.8986},
{'mask': {'hash': 'cd24047c8a', 'shape': (4_80, 6_40)}, 'scores': 0.8984},
{'mask': {'hash': '6943e6bcbd', 'shape': (4_80, 6_40)}, 'scores': 0.8873},
{'mask': {'hash': 'b5f47c9191', 'shape': (4_80, 6_40)}, 'scores': 0.8871}
] , )
# fmt: on
@require_torch
@slow
def __UpperCAmelCase ( self : int ):
'''simple docstring'''
_snake_case : Tuple = 'facebook/sam-vit-huge'
_snake_case : int = pipeline('mask-generation' , model=lowerCamelCase_ )
_snake_case : Tuple = image_segmenter(
'http://images.cocodataset.org/val2017/000000039769.jpg' , pred_iou_thresh=1 , points_per_batch=2_56 )
# Shortening by hashing
_snake_case : str = []
for i, o in enumerate(outputs['masks'] ):
new_outupt += [{"mask": mask_to_test_readable(lowerCamelCase_ ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(lowerCamelCase_ , decimals=4 ) , [
{'mask': {'hash': '115ad19f5f', 'shape': (4_80, 6_40)}, 'scores': 1.0444},
{'mask': {'hash': '6affa964c6', 'shape': (4_80, 6_40)}, 'scores': 1.0210},
{'mask': {'hash': 'dfe28a0388', 'shape': (4_80, 6_40)}, 'scores': 1.0167},
{'mask': {'hash': 'c0a5f4a318', 'shape': (4_80, 6_40)}, 'scores': 1.0132},
{'mask': {'hash': 'fe8065c197', 'shape': (4_80, 6_40)}, 'scores': 1.0053},
] , )
| 720 |
import argparse
import logging
import os
from datetime import datetime
import numpy as np
import torch
from torch import nn
from torch.utils.data import DataLoader, RandomSampler, TensorDataset
from tqdm import tqdm
from transformers import GPTaLMHeadModel
lowercase_ : Any = logging.getLogger(__name__)
def A__( __lowerCAmelCase , __lowerCAmelCase ):
# save results
if os.path.exists(__lowerCAmelCase ):
if os.path.exists(os.path.join(__lowerCAmelCase , 'config.json' ) ) and os.path.isfile(
os.path.join(__lowerCAmelCase , 'config.json' ) ):
os.remove(os.path.join(__lowerCAmelCase , 'config.json' ) )
if os.path.exists(os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) ) and os.path.isfile(
os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) ):
os.remove(os.path.join(__lowerCAmelCase , 'pytorch_model.bin' ) )
else:
os.makedirs(__lowerCAmelCase )
model.save_pretrained(__lowerCAmelCase )
def A__( __lowerCAmelCase , __lowerCAmelCase=False ):
_snake_case : int = 2
if unlogit:
_snake_case : Dict = torch.pow(__lowerCAmelCase , __lowerCAmelCase )
_snake_case : Optional[int] = p * torch.log(__lowerCAmelCase )
_snake_case : List[str] = 0
return -plogp.sum(dim=-1 )
def A__( __lowerCAmelCase ):
logger.info('lv, h >\t' + '\t'.join(F'''{x + 1}''' for x in range(len(__lowerCAmelCase ) ) ) )
for row in range(len(__lowerCAmelCase ) ):
if tensor.dtype != torch.long:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:.5f}''' for x in tensor[row].cpu().data ) )
else:
logger.info(F'''layer {row + 1}:\t''' + '\t'.join(F'''{x:d}''' for x in tensor[row].cpu().data ) )
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=True , __lowerCAmelCase=True , __lowerCAmelCase=None , __lowerCAmelCase=False ):
_snake_case , _snake_case : List[str] = model.config.num_hidden_layers, model.config.num_attention_heads
_snake_case : Optional[int] = torch.zeros(__lowerCAmelCase , __lowerCAmelCase ).to(args.device )
_snake_case : Union[str, Any] = torch.zeros(__lowerCAmelCase , __lowerCAmelCase ).to(args.device )
if head_mask is None:
_snake_case : List[str] = torch.ones(__lowerCAmelCase , __lowerCAmelCase ).to(args.device )
head_mask.requires_grad_(requires_grad=__lowerCAmelCase )
# If actually pruned attention multi-head, set head mask to None to avoid shape mismatch
if actually_pruned:
_snake_case : List[str] = None
_snake_case : str = 0.0
_snake_case : List[str] = 0.0
for step, inputs in enumerate(tqdm(__lowerCAmelCase , desc='Iteration' , disable=args.local_rank not in [-1, 0] ) ):
_snake_case : Dict = tuple(t.to(args.device ) for t in inputs )
((_snake_case) , ) : int = inputs
# Do a forward pass (not with torch.no_grad() since we need gradients for importance score - see below)
_snake_case : List[str] = model(__lowerCAmelCase , labels=__lowerCAmelCase , head_mask=__lowerCAmelCase )
# (loss), lm_logits, presents, (all hidden_states), (attentions)
_snake_case , _snake_case , _snake_case : Dict = (
outputs[0],
outputs[1],
outputs[-1],
) # Loss and logits are the first, attention the last
loss.backward() # Backpropagate to populate the gradients in the head mask
total_loss += loss.detach().cpu().numpy()
if compute_entropy:
for layer, attn in enumerate(__lowerCAmelCase ):
_snake_case : int = entropy(attn.detach() , __lowerCAmelCase )
attn_entropy[layer] += masked_entropy.sum(-1 ).sum(0 ).sum(0 ).detach()
if compute_importance:
head_importance += head_mask.grad.abs().detach()
tot_tokens += torch.ones_like(__lowerCAmelCase ).float().detach().sum().data
# Normalize
attn_entropy /= tot_tokens
head_importance /= tot_tokens
# Layerwise importance normalization
if not args.dont_normalize_importance_by_layer:
_snake_case : List[str] = 2
_snake_case : Any = torch.pow(torch.pow(__lowerCAmelCase , __lowerCAmelCase ).sum(-1 ) , 1 / exponent )
head_importance /= norm_by_layer.unsqueeze(-1 ) + 1E-20
if not args.dont_normalize_global_importance:
_snake_case : Union[str, Any] = (head_importance - head_importance.min()) / (head_importance.max() - head_importance.min())
# Print matrices
if compute_entropy:
logger.info('Attention entropies' )
print_ad_tensor(__lowerCAmelCase )
if compute_importance:
logger.info('Head importance scores' )
print_ad_tensor(__lowerCAmelCase )
logger.info('Head ranked by importance scores' )
_snake_case : Tuple = torch.zeros(head_importance.numel() , dtype=torch.long , device=args.device )
_snake_case : Optional[Any] = torch.arange(
head_importance.numel() , device=args.device )
_snake_case : Union[str, Any] = head_ranks.view_as(__lowerCAmelCase )
print_ad_tensor(__lowerCAmelCase )
return attn_entropy, head_importance, total_loss
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case , _snake_case , _snake_case : int = compute_heads_importance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase )
_snake_case : List[str] = 1 / loss # instead of downsteam score use the LM loss
logger.info('Pruning: original score: %f, threshold: %f' , __lowerCAmelCase , original_score * args.masking_threshold )
_snake_case : Optional[Any] = torch.ones_like(__lowerCAmelCase )
_snake_case : Tuple = max(1 , int(new_head_mask.numel() * args.masking_amount ) )
_snake_case : Dict = original_score
while current_score >= original_score * args.masking_threshold:
_snake_case : List[str] = new_head_mask.clone().detach() # save current head mask
# heads from least important to most - keep only not-masked heads
_snake_case : Optional[Any] = float('Inf' )
_snake_case : Optional[int] = head_importance.view(-1 ).sort()[1]
if len(__lowerCAmelCase ) <= num_to_mask:
print('BREAK BY num_to_mask' )
break
# mask heads
_snake_case : List[Any] = current_heads_to_mask[:num_to_mask]
logger.info('Heads to mask: %s' , str(current_heads_to_mask.tolist() ) )
_snake_case : Tuple = new_head_mask.view(-1 )
_snake_case : List[str] = 0.0
_snake_case : int = new_head_mask.view_as(__lowerCAmelCase )
_snake_case : Optional[Any] = new_head_mask.clone().detach()
print_ad_tensor(__lowerCAmelCase )
# Compute metric and head importance again
_snake_case , _snake_case , _snake_case : List[str] = compute_heads_importance(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , head_mask=__lowerCAmelCase )
_snake_case : Union[str, Any] = 1 / loss
logger.info(
'Masking: current score: %f, remaining heads %d (%.1f percents)' , __lowerCAmelCase , new_head_mask.sum() , new_head_mask.sum() / new_head_mask.numel() * 1_00 , )
logger.info('Final head mask' )
print_ad_tensor(__lowerCAmelCase )
np.save(os.path.join(args.output_dir , 'head_mask.npy' ) , head_mask.detach().cpu().numpy() )
return head_mask
def A__( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_snake_case : List[Any] = datetime.now()
_snake_case , _snake_case , _snake_case : List[Any] = compute_heads_importance(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , compute_importance=__lowerCAmelCase , head_mask=__lowerCAmelCase )
_snake_case : str = 1 / loss
_snake_case : Optional[int] = datetime.now() - before_time
_snake_case : Optional[int] = sum(p.numel() for p in model.parameters() )
_snake_case : Optional[Any] = {
layer: (1 - head_mask[layer].long()).nonzero().squeeze().tolist() for layer in range(len(__lowerCAmelCase ) )
}
for k, v in heads_to_prune.items():
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_snake_case : Optional[int] = [
v,
]
assert sum(len(__lowerCAmelCase ) for h in heads_to_prune.values() ) == (1 - head_mask.long()).sum().item()
model.prune_heads(__lowerCAmelCase )
_snake_case : Optional[Any] = sum(p.numel() for p in model.parameters() )
_snake_case : List[str] = datetime.now()
_snake_case , _snake_case , _snake_case : Union[str, Any] = compute_heads_importance(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , compute_entropy=__lowerCAmelCase , compute_importance=__lowerCAmelCase , head_mask=__lowerCAmelCase , actually_pruned=__lowerCAmelCase , )
_snake_case : Dict = 1 / loss
_snake_case : str = datetime.now() - before_time
logger.info(
'Pruning: original num of params: %.2e, after pruning %.2e (%.1f percents)' , __lowerCAmelCase , __lowerCAmelCase , pruned_num_params / original_num_params * 1_00 , )
logger.info('Pruning: score with masking: %f score with pruning: %f' , __lowerCAmelCase , __lowerCAmelCase )
logger.info('Pruning: speed ratio (original timing / new timing): %f percents' , original_time / new_time * 1_00 )
save_model(__lowerCAmelCase , args.output_dir )
def A__( ):
_snake_case : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--data_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='The input data dir. Should contain the .tsv files (or other data files) for the task.' , )
parser.add_argument(
'--model_name_or_path' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--output_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , required=__lowerCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , )
# Other parameters
parser.add_argument(
'--config_name' , default='' , type=__lowerCAmelCase , help='Pretrained config name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--tokenizer_name' , default='' , type=__lowerCAmelCase , help='Pretrained tokenizer name or path if not the same as model_name_or_path' , )
parser.add_argument(
'--cache_dir' , default=__lowerCAmelCase , type=__lowerCAmelCase , help='Where do you want to store the pre-trained models downloaded from s3' , )
parser.add_argument(
'--data_subset' , type=__lowerCAmelCase , default=-1 , help='If > 0: limit the data to a subset of data_subset instances.' )
parser.add_argument(
'--overwrite_output_dir' , action='store_true' , help='Whether to overwrite data in output directory' )
parser.add_argument(
'--overwrite_cache' , action='store_true' , help='Overwrite the cached training and evaluation sets' )
parser.add_argument(
'--dont_normalize_importance_by_layer' , action='store_true' , help='Don\'t normalize importance score by layers' )
parser.add_argument(
'--dont_normalize_global_importance' , action='store_true' , help='Don\'t normalize all importance scores between 0 and 1' , )
parser.add_argument(
'--try_masking' , action='store_true' , help='Whether to try to mask head until a threshold of accuracy.' )
parser.add_argument(
'--masking_threshold' , default=0.9 , type=__lowerCAmelCase , help='masking threshold in term of metrics (stop masking when metric < threshold * original metric value).' , )
parser.add_argument(
'--masking_amount' , default=0.1 , type=__lowerCAmelCase , help='Amount to heads to masking at each masking step.' )
parser.add_argument('--metric_name' , default='acc' , type=__lowerCAmelCase , help='Metric to use for head masking.' )
parser.add_argument(
'--max_seq_length' , default=1_28 , type=__lowerCAmelCase , help=(
'The maximum total input sequence length after WordPiece tokenization. \n'
'Sequences longer than this will be truncated, sequences shorter padded.'
) , )
parser.add_argument('--batch_size' , default=1 , type=__lowerCAmelCase , help='Batch size.' )
parser.add_argument('--seed' , type=__lowerCAmelCase , default=42 )
parser.add_argument('--local_rank' , type=__lowerCAmelCase , default=-1 , help='local_rank for distributed training on gpus' )
parser.add_argument('--no_cuda' , action='store_true' , help='Whether not to use CUDA when available' )
parser.add_argument('--server_ip' , type=__lowerCAmelCase , default='' , help='Can be used for distant debugging.' )
parser.add_argument('--server_port' , type=__lowerCAmelCase , default='' , help='Can be used for distant debugging.' )
_snake_case : List[str] = parser.parse_args()
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print('Waiting for debugger attach' )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=__lowerCAmelCase )
ptvsd.wait_for_attach()
# Setup devices and distributed training
if args.local_rank == -1 or args.no_cuda:
_snake_case : Optional[Any] = torch.device('cuda' if torch.cuda.is_available() and not args.no_cuda else 'cpu' )
_snake_case : Any = 0 if args.no_cuda else torch.cuda.device_count()
else:
torch.cuda.set_device(args.local_rank )
_snake_case : Union[str, Any] = torch.device('cuda' , args.local_rank )
_snake_case : Dict = 1
torch.distributed.init_process_group(backend='nccl' ) # Initializes the distributed backend
# Setup logging
logging.basicConfig(level=logging.INFO if args.local_rank in [-1, 0] else logging.WARN )
logger.info('device: {} n_gpu: {}, distributed: {}'.format(args.device , args.n_gpu , bool(args.local_rank != -1 ) ) )
_snake_case : Any = GPTaLMHeadModel.from_pretrained(args.model_name_or_path )
# Distributed and parallel training
model.to(args.device )
if args.local_rank != -1:
_snake_case : List[str] = nn.parallel.DistributedDataParallel(
__lowerCAmelCase , device_ids=[args.local_rank] , output_device=args.local_rank , find_unused_parameters=__lowerCAmelCase )
elif args.n_gpu > 1:
_snake_case : int = nn.DataParallel(__lowerCAmelCase )
# Print/save training arguments
os.makedirs(args.output_dir , exist_ok=__lowerCAmelCase )
torch.save(__lowerCAmelCase , os.path.join(args.output_dir , 'run_args.bin' ) )
logger.info('Training/evaluation parameters %s' , __lowerCAmelCase )
# Prepare dataset
_snake_case : str = np.concatenate(
[
np.loadtxt(args.data_dir , dtype=np.intaa ),
] )
_snake_case : Dict = (torch.from_numpy(__lowerCAmelCase ),)
_snake_case : List[Any] = TensorDataset(*__lowerCAmelCase )
_snake_case : List[str] = RandomSampler(__lowerCAmelCase )
_snake_case : List[str] = DataLoader(__lowerCAmelCase , sampler=__lowerCAmelCase , batch_size=args.batch_size )
# Compute head entropy and importance score
compute_heads_importance(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Try head masking (set heads to zero until the score goes under a threshole)
# and head pruning (remove masked heads and see the effect on the network)
if args.try_masking and args.masking_threshold > 0.0 and args.masking_threshold < 1.0:
_snake_case : Union[str, Any] = mask_heads(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
prune_heads(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main()
| 652 | 0 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class _lowerCAmelCase:
"""simple docstring"""
@staticmethod
def _a ( *_lowerCamelCase , **_lowerCamelCase ):
pass
@is_pipeline_test
@require_torch
@require_vision
class _lowerCAmelCase( unittest.TestCase ):
"""simple docstring"""
a : List[Any] =MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def _a ( self , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: str = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
UpperCamelCase_: Optional[Any] = [
{
'image': Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'question': 'How many cats are there?',
},
{
'image': './tests/fixtures/tests_samples/COCO/000000039769.png',
'question': 'How many cats are there?',
},
]
return vqa_pipeline, examples
def _a ( self , _lowerCamelCase , _lowerCamelCase ):
UpperCamelCase_: Optional[int] = vqa_pipeline(_lowerCamelCase , top_k=1 )
self.assertEqual(
_lowerCamelCase , [
[{'score': ANY(_lowerCamelCase ), 'answer': ANY(_lowerCamelCase )}],
[{'score': ANY(_lowerCamelCase ), 'answer': ANY(_lowerCamelCase )}],
] , )
@require_torch
def _a ( self ):
UpperCamelCase_: Tuple = pipeline('visual-question-answering' , model='hf-internal-testing/tiny-vilt-random-vqa' )
UpperCamelCase_: Any = './tests/fixtures/tests_samples/COCO/000000039769.png'
UpperCamelCase_: int = 'How many cats are there?'
UpperCamelCase_: int = vqa_pipeline(image=_lowerCamelCase , question='How many cats are there?' , top_k=2 )
self.assertEqual(
_lowerCamelCase , [{'score': ANY(_lowerCamelCase ), 'answer': ANY(_lowerCamelCase )}, {'score': ANY(_lowerCamelCase ), 'answer': ANY(_lowerCamelCase )}] )
UpperCamelCase_: Optional[Any] = vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
_lowerCamelCase , [{'score': ANY(_lowerCamelCase ), 'answer': ANY(_lowerCamelCase )}, {'score': ANY(_lowerCamelCase ), 'answer': ANY(_lowerCamelCase )}] )
@slow
@require_torch
def _a ( self ):
UpperCamelCase_: Optional[Any] = pipeline('visual-question-answering' , model='dandelin/vilt-b32-finetuned-vqa' )
UpperCamelCase_: List[str] = './tests/fixtures/tests_samples/COCO/000000039769.png'
UpperCamelCase_: str = 'How many cats are there?'
UpperCamelCase_: Optional[Any] = vqa_pipeline(image=_lowerCamelCase , question=_lowerCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [{'score': 0.8_7_9_9, 'answer': '2'}, {'score': 0.2_9_6, 'answer': '1'}] )
UpperCamelCase_: Tuple = vqa_pipeline({'image': image, 'question': question} , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [{'score': 0.8_7_9_9, 'answer': '2'}, {'score': 0.2_9_6, 'answer': '1'}] )
UpperCamelCase_: Optional[int] = vqa_pipeline(
[{'image': image, 'question': question}, {'image': image, 'question': question}] , top_k=2 )
self.assertEqual(
nested_simplify(_lowerCamelCase , decimals=4 ) , [[{'score': 0.8_7_9_9, 'answer': '2'}, {'score': 0.2_9_6, 'answer': '1'}]] * 2 , )
@require_tf
@unittest.skip('Visual question answering not implemented in TF' )
def _a ( self ):
pass | 57 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
UpperCamelCase_ = {"""tokenization_byt5""": ["""ByT5Tokenizer"""]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 92 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : Dict = logging.get_logger(__name__)
_lowerCamelCase : Tuple = {'''vocab_file''': '''sentencepiece.bpe.model'''}
_lowerCamelCase : Union[str, Any] = {
'''vocab_file''': {
'''moussaKam/mbarthez''': '''https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez''': '''https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model''',
'''moussaKam/barthez-orangesum-title''': (
'''https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'''
),
},
}
_lowerCamelCase : Any = {
'''moussaKam/mbarthez''': 1_0_2_4,
'''moussaKam/barthez''': 1_0_2_4,
'''moussaKam/barthez-orangesum-title''': 1_0_2_4,
}
_lowerCamelCase : int = '''▁'''
class lowerCamelCase (lowercase__ ):
"""simple docstring"""
UpperCAmelCase_ = VOCAB_FILES_NAMES
UpperCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ = ["input_ids", "attention_mask"]
def __init__( self : List[str], _UpperCAmelCase : Optional[Any], _UpperCAmelCase : int="<s>", _UpperCAmelCase : int="</s>", _UpperCAmelCase : int="</s>", _UpperCAmelCase : List[Any]="<s>", _UpperCAmelCase : Union[str, Any]="<unk>", _UpperCAmelCase : int="<pad>", _UpperCAmelCase : str="<mask>", _UpperCAmelCase : Optional[Dict[str, Any]] = None, **_UpperCAmelCase : List[str], ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = AddedToken(UpperCAmelCase__, lstrip=UpperCAmelCase__, rstrip=UpperCAmelCase__ ) if isinstance(UpperCAmelCase__, UpperCAmelCase__ ) else mask_token
SCREAMING_SNAKE_CASE__ : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCAmelCase__, eos_token=UpperCAmelCase__, unk_token=UpperCAmelCase__, sep_token=UpperCAmelCase__, cls_token=UpperCAmelCase__, pad_token=UpperCAmelCase__, mask_token=UpperCAmelCase__, sp_model_kwargs=self.sp_model_kwargs, **UpperCAmelCase__, )
SCREAMING_SNAKE_CASE__ : Optional[Any] = vocab_file
SCREAMING_SNAKE_CASE__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCAmelCase__ ) )
SCREAMING_SNAKE_CASE__ : Dict = {'''<s>''': 0, '''<pad>''': 1, '''</s>''': 2, '''<unk>''': 3}
SCREAMING_SNAKE_CASE__ : Dict = len(self.sp_model ) - 1
SCREAMING_SNAKE_CASE__ : Dict = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def A_ ( self : Tuple, _UpperCAmelCase : List[int], _UpperCAmelCase : Optional[List[int]] = None ) -> Optional[Any]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Tuple = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ : str = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A_ ( self : Optional[Any], _UpperCAmelCase : List[int], _UpperCAmelCase : Optional[List[int]] = None, _UpperCAmelCase : bool = False ) -> int:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase__, token_ids_a=UpperCAmelCase__, already_has_special_tokens=UpperCAmelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase__ )) + [1]
return [1] + ([0] * len(UpperCAmelCase__ )) + [1, 1] + ([0] * len(UpperCAmelCase__ )) + [1]
def A_ ( self : Optional[Any], _UpperCAmelCase : List[int], _UpperCAmelCase : Optional[List[int]] = None ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def A_ ( self : Optional[int] ) -> Any:
"""simple docstring"""
return len(self.sp_model )
def A_ ( self : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = {self.convert_ids_to_tokens(UpperCAmelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A_ ( self : Tuple, _UpperCAmelCase : str ) -> int:
"""simple docstring"""
return self.sp_model.encode(UpperCAmelCase__, out_type=UpperCAmelCase__ )
def A_ ( self : str, _UpperCAmelCase : List[str] ) -> List[str]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE__ : List[Any] = self.sp_model.PieceToId(UpperCAmelCase__ )
return spm_id if spm_id else self.unk_token_id
def A_ ( self : Optional[int], _UpperCAmelCase : int ) -> str:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(UpperCAmelCase__ )
def A_ ( self : List[str], _UpperCAmelCase : int ) -> Dict:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Union[str, Any] = []
SCREAMING_SNAKE_CASE__ : Any = ''''''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(UpperCAmelCase__ ) + token
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
SCREAMING_SNAKE_CASE__ : List[str] = []
else:
current_sub_tokens.append(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ : List[str] = False
out_string += self.sp_model.decode(UpperCAmelCase__ )
return out_string.strip()
def __getstate__( self : List[str] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = None
return state
def __setstate__( self : List[Any], _UpperCAmelCase : str ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = d
# for backward compatibility
if not hasattr(self, "sp_model_kwargs" ):
SCREAMING_SNAKE_CASE__ : Optional[int] = {}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def A_ ( self : List[str], _UpperCAmelCase : str, _UpperCAmelCase : Optional[str] = None ) -> Tuple:
"""simple docstring"""
if not os.path.isdir(UpperCAmelCase__ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ : int = os.path.join(
UpperCAmelCase__, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, UpperCAmelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCAmelCase__, "wb" ) as fi:
SCREAMING_SNAKE_CASE__ : Dict = self.sp_model.serialized_model_proto()
fi.write(UpperCAmelCase__ )
return (out_vocab_file,)
| 700 |
class lowerCamelCase :
"""simple docstring"""
def __init__( self : str, _UpperCAmelCase : list ) -> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = set_counts
SCREAMING_SNAKE_CASE__ : Dict = max(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] = len(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = [1] * num_sets
SCREAMING_SNAKE_CASE__ : Optional[int] = list(range(_UpperCAmelCase ) )
def A_ ( self : Dict, _UpperCAmelCase : int, _UpperCAmelCase : int ) -> bool:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.get_parent(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_parent(_UpperCAmelCase )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
SCREAMING_SNAKE_CASE__ : Dict = 0
SCREAMING_SNAKE_CASE__ : Dict = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
SCREAMING_SNAKE_CASE__ : Optional[int] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
SCREAMING_SNAKE_CASE__ : Dict = 0
SCREAMING_SNAKE_CASE__ : str = src_parent
SCREAMING_SNAKE_CASE__ : int = self.set_counts[src_parent]
SCREAMING_SNAKE_CASE__ : Optional[int] = max(self.max_set, _UpperCAmelCase )
return True
def A_ ( self : int, _UpperCAmelCase : int ) -> int:
"""simple docstring"""
if self.parents[disj_set] == disj_set:
return disj_set
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set]
| 157 | 0 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class UpperCamelCase_ (__A ):
__magic_name__ = ['''image_processor''', '''tokenizer''']
__magic_name__ = '''FlavaImageProcessor'''
__magic_name__ = ('''BertTokenizer''', '''BertTokenizerFast''')
def __init__( self : Optional[int] , lowerCAmelCase_ : Tuple=None , lowerCAmelCase_ : List[str]=None , **lowerCAmelCase_ : Union[str, Any] ) -> List[Any]:
UpperCAmelCase_ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , lowerCAmelCase_ , )
UpperCAmelCase_ : Tuple = kwargs.pop("feature_extractor" )
UpperCAmelCase_ : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase_ : List[str] = self.image_processor
def __call__( self : List[Any] , lowerCAmelCase_ : Optional[ImageInput] = None , lowerCAmelCase_ : Optional[Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]]] = None , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Union[bool, str, PaddingStrategy] = False , lowerCAmelCase_ : Union[bool, str, TruncationStrategy] = False , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : int = 0 , lowerCAmelCase_ : Optional[int] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : Optional[bool] = None , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = False , lowerCAmelCase_ : bool = True , lowerCAmelCase_ : Optional[Union[str, TensorType]] = None , **lowerCAmelCase_ : Optional[int] , ) -> List[Any]:
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
UpperCAmelCase_ : Union[str, Any] = self.tokenizer(
text=lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ , stride=lowerCAmelCase_ , pad_to_multiple_of=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , return_overflowing_tokens=lowerCAmelCase_ , return_special_tokens_mask=lowerCAmelCase_ , return_offsets_mapping=lowerCAmelCase_ , return_length=lowerCAmelCase_ , verbose=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
if images is not None:
UpperCAmelCase_ : Optional[Any] = self.image_processor(
lowerCAmelCase_ , return_image_mask=lowerCAmelCase_ , return_codebook_pixels=lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ , )
if text is not None and images is not None:
encoding.update(lowerCAmelCase_ )
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowerCAmelCase_ ) , tensor_type=lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] , *lowerCAmelCase_ : int , **lowerCAmelCase_ : List[str] ) -> Any:
return self.tokenizer.batch_decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , *lowerCAmelCase_ : List[str] , **lowerCAmelCase_ : Optional[int] ) -> List[Any]:
return self.tokenizer.decode(*lowerCAmelCase_ , **lowerCAmelCase_ )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
UpperCAmelCase_ : List[Any] = self.tokenizer.model_input_names
UpperCAmelCase_ : Any = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , lowerCAmelCase_ , )
return self.image_processor_class
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> int:
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , lowerCAmelCase_ , )
return self.image_processor
| 95 |
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
_lowerCamelCase : Dict = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class lowercase ( nn.Module):
'''simple docstring'''
def __init__( self : Optional[Any] , snake_case : int ):
'''simple docstring'''
super().__init__()
SCREAMING_SNAKE_CASE : Optional[Any] = torchvision.models.resnetaaa(pretrained=snake_case )
SCREAMING_SNAKE_CASE : Optional[Any] = list(model.children() )[:-2]
SCREAMING_SNAKE_CASE : List[str] = nn.Sequential(*snake_case )
SCREAMING_SNAKE_CASE : Optional[int] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def lowerCamelCase_ ( self : Any , snake_case : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = self.pool(self.model(snake_case ) )
SCREAMING_SNAKE_CASE : Any = torch.flatten(snake_case , start_dim=2 )
SCREAMING_SNAKE_CASE : Union[str, Any] = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class lowercase ( SCREAMING_SNAKE_CASE_):
'''simple docstring'''
def __init__( self : str , snake_case : Optional[Any] , snake_case : Union[str, Any] , snake_case : Optional[int] , snake_case : int , snake_case : Optional[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = [json.loads(snake_case ) for l in open(snake_case )]
SCREAMING_SNAKE_CASE : Optional[Any] = os.path.dirname(snake_case )
SCREAMING_SNAKE_CASE : Optional[int] = tokenizer
SCREAMING_SNAKE_CASE : Dict = labels
SCREAMING_SNAKE_CASE : Optional[Any] = len(snake_case )
SCREAMING_SNAKE_CASE : int = max_seq_length
SCREAMING_SNAKE_CASE : List[Any] = transforms
def __len__( self : str ):
'''simple docstring'''
return len(self.data )
def __getitem__( self : Dict , snake_case : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = torch.LongTensor(self.tokenizer.encode(self.data[index]['text'] , add_special_tokens=snake_case ) )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[Any] = sentence[0], sentence[1:-1], sentence[-1]
SCREAMING_SNAKE_CASE : str = sentence[: self.max_seq_length]
SCREAMING_SNAKE_CASE : Optional[Any] = torch.zeros(self.n_classes )
SCREAMING_SNAKE_CASE : Optional[int] = 1
SCREAMING_SNAKE_CASE : Optional[Any] = Image.open(os.path.join(self.data_dir , self.data[index]['img'] ) ).convert('RGB' )
SCREAMING_SNAKE_CASE : Tuple = self.transforms(snake_case )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def lowerCamelCase_ ( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = Counter()
for row in self.data:
label_freqs.update(row['label'] )
return label_freqs
def __a ( __lowerCAmelCase ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE : str = [len(row['sentence'] ) for row in batch]
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Dict = len(__lowerCAmelCase ), max(__lowerCAmelCase )
SCREAMING_SNAKE_CASE : List[Any] = torch.zeros(__lowerCAmelCase , __lowerCAmelCase , dtype=torch.long )
SCREAMING_SNAKE_CASE : List[str] = torch.zeros(__lowerCAmelCase , __lowerCAmelCase , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(__lowerCAmelCase , __lowerCAmelCase ) ):
SCREAMING_SNAKE_CASE : Optional[Any] = input_row['sentence']
SCREAMING_SNAKE_CASE : Optional[Any] = 1
SCREAMING_SNAKE_CASE : List[str] = torch.stack([row['image'] for row in batch] )
SCREAMING_SNAKE_CASE : Union[str, Any] = torch.stack([row['label'] for row in batch] )
SCREAMING_SNAKE_CASE : Tuple = torch.stack([row['image_start_token'] for row in batch] )
SCREAMING_SNAKE_CASE : Any = torch.stack([row['image_end_token'] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def __a ( ) -> str:
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def __a ( ) -> Union[str, Any]:
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46_777_044, 0.44_531_429, 0.40_661_017] , std=[0.12_221_994, 0.12_145_835, 0.14_380_469] , ),
] ) | 352 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import is_flax_available
from transformers.testing_utils import require_flax
from ..test_modeling_flax_common import ids_tensor
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.generation import (
FlaxForcedBOSTokenLogitsProcessor,
FlaxForcedEOSTokenLogitsProcessor,
FlaxLogitsProcessorList,
FlaxMinLengthLogitsProcessor,
FlaxTemperatureLogitsWarper,
FlaxTopKLogitsWarper,
FlaxTopPLogitsWarper,
)
@require_flax
class _a ( unittest.TestCase ):
'''simple docstring'''
def __lowercase ( self , UpperCAmelCase_ , UpperCAmelCase_) -> Any:
'''simple docstring'''
lowercase__: Tuple = jnp.ones((batch_size, length)) / length
return scores
def __lowercase ( self) -> Dict:
'''simple docstring'''
lowercase__: Tuple = None
lowercase__: List[str] = 20
lowercase__: str = self._get_uniform_logits(batch_size=2 , length=UpperCAmelCase_)
# tweak scores to not be uniform anymore
lowercase__: List[str] = scores.at[1, 5].set((1 / length) + 0.1) # peak, 1st batch
lowercase__: Optional[Any] = scores.at[1, 10].set((1 / length) - 0.4) # valley, 1st batch
# compute softmax
lowercase__: int = jax.nn.softmax(UpperCAmelCase_ , axis=-1)
lowercase__: Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5)
lowercase__: str = FlaxTemperatureLogitsWarper(temperature=1.3)
lowercase__: str = jax.nn.softmax(temp_dist_warper_sharper(UpperCAmelCase_ , scores.copy() , cur_len=UpperCAmelCase_) , axis=-1)
lowercase__: Optional[Any] = jax.nn.softmax(temp_dist_warper_smoother(UpperCAmelCase_ , scores.copy() , cur_len=UpperCAmelCase_) , axis=-1)
# uniform distribution stays uniform
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_sharp[0, :] , atol=1E-3))
self.assertTrue(jnp.allclose(probs[0, :] , warped_prob_smooth[0, :] , atol=1E-3))
# sharp peaks get higher, valleys get lower
self.assertLess(probs[1, :].max() , warped_prob_sharp[1, :].max())
self.assertGreater(probs[1, :].min() , warped_prob_sharp[1, :].min())
# smooth peaks get lower, valleys get higher
self.assertGreater(probs[1, :].max() , warped_prob_smooth[1, :].max())
self.assertLess(probs[1, :].min() , warped_prob_smooth[1, :].min())
def __lowercase ( self) -> Tuple:
'''simple docstring'''
lowercase__: Union[str, Any] = None
lowercase__: Union[str, Any] = 10
lowercase__: Tuple = 2
# create ramp distribution
lowercase__: List[Any] = np.broadcast_to(np.arange(UpperCAmelCase_)[None, :] , (batch_size, vocab_size)).copy()
lowercase__: str = ramp_logits[1:, : vocab_size // 2] + vocab_size
lowercase__: Dict = FlaxTopKLogitsWarper(3)
lowercase__: Dict = top_k_warp(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_)
# check that correct tokens are filtered
self.assertListEqual(jnp.isinf(scores[0]).tolist() , 7 * [True] + 3 * [False])
self.assertListEqual(jnp.isinf(scores[1]).tolist() , 2 * [True] + 3 * [False] + 5 * [True])
# check special case
lowercase__: str = 5
lowercase__: List[str] = FlaxTopKLogitsWarper(top_k=1 , filter_value=0.0 , min_tokens_to_keep=3)
lowercase__: int = np.broadcast_to(np.arange(UpperCAmelCase_)[None, :] , (batch_size, length)).copy()
lowercase__: Tuple = top_k_warp_safety_check(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_)
# min_tokens overwrites k: 3 tokens are kept => 2 tokens are nullified
self.assertListEqual((scores == 0.0).sum(axis=-1).tolist() , [2, 2])
def __lowercase ( self) -> Tuple:
'''simple docstring'''
lowercase__: List[Any] = None
lowercase__: Union[str, Any] = 10
lowercase__: Union[str, Any] = 2
# create distribution and take log (inverse to Softmax as taken in TopPLogitsWarper)
lowercase__: int = np.log(np.array([[0.3, 0.1, 0.1, 0.5], [0.15, 0.3, 0.3, 0.25]]))
lowercase__: List[str] = FlaxTopPLogitsWarper(0.8)
lowercase__: Any = np.exp(top_p_warp(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_))
# dist should be filtered to keep min num values so that sum is >= top_p
# exp (-inf) => 0
lowercase__: Union[str, Any] = np.array([[0.3, 0.0, 0.0, 0.5], [0.0, 0.3, 0.3, 0.25]])
self.assertTrue(np.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3))
# check edge cases with negative and extreme logits
lowercase__: List[str] = np.broadcast_to(np.arange(UpperCAmelCase_)[None, :] , (batch_size, vocab_size)).copy() - (
vocab_size // 2
)
# make ramp_logits more extreme
lowercase__: List[str] = ramp_logits[1] * 100.0
# make sure at least 2 tokens are kept
lowercase__: Optional[int] = FlaxTopPLogitsWarper(0.9 , min_tokens_to_keep=2 , filter_value=0.0)
lowercase__: Optional[Any] = top_p_warp(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_)
# first batch should keep three tokens, second batch would keep only 1, but due to `min_tokens_to_keep=2` keeps 2.
self.assertListEqual((filtered_dist != 0.0).sum(axis=-1).tolist() , [3, 2])
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
lowercase__: Tuple = 20
lowercase__: Optional[int] = 4
lowercase__: Tuple = 0
lowercase__: Dict = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase_)
# check that min length is applied at length 5
lowercase__: int = ids_tensor((batch_size, 20) , vocab_size=20)
lowercase__: List[Any] = 5
lowercase__: List[str] = self._get_uniform_logits(UpperCAmelCase_ , UpperCAmelCase_)
lowercase__: List[str] = min_dist_processor(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_)
self.assertListEqual(scores_before_min_length[:, eos_token_id].tolist() , 4 * [-float("inf")])
# check that min length is not applied anymore at length 15
lowercase__: Tuple = self._get_uniform_logits(UpperCAmelCase_ , UpperCAmelCase_)
lowercase__: Tuple = 15
lowercase__: int = min_dist_processor(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_)
self.assertFalse(jnp.isinf(UpperCAmelCase_).any())
def __lowercase ( self) -> Union[str, Any]:
'''simple docstring'''
lowercase__: Dict = 20
lowercase__: Dict = 4
lowercase__: List[Any] = 0
lowercase__: List[str] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase_)
# check that all scores are -inf except the bos_token_id score
lowercase__: Union[str, Any] = ids_tensor((batch_size, 1) , vocab_size=20)
lowercase__: Tuple = 1
lowercase__: Dict = self._get_uniform_logits(UpperCAmelCase_ , UpperCAmelCase_)
lowercase__: Optional[int] = logits_processor(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_)
self.assertTrue(jnp.isneginf(scores[:, bos_token_id + 1 :]).all())
self.assertListEqual(scores[:, bos_token_id].tolist() , 4 * [0]) # score for bos_token_id shold be zero
# check that bos_token_id is not forced if current length is greater than 1
lowercase__: Tuple = 3
lowercase__: Optional[int] = self._get_uniform_logits(UpperCAmelCase_ , UpperCAmelCase_)
lowercase__: Tuple = logits_processor(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_)
self.assertFalse(jnp.isinf(UpperCAmelCase_).any())
def __lowercase ( self) -> List[str]:
'''simple docstring'''
lowercase__: str = 20
lowercase__: Any = 4
lowercase__: Optional[Any] = 0
lowercase__: int = 5
lowercase__: Union[str, Any] = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_)
# check that all scores are -inf except the eos_token_id when max_length is reached
lowercase__: Optional[Any] = ids_tensor((batch_size, 4) , vocab_size=20)
lowercase__: int = 4
lowercase__: str = self._get_uniform_logits(UpperCAmelCase_ , UpperCAmelCase_)
lowercase__: Dict = logits_processor(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_)
self.assertTrue(jnp.isneginf(scores[:, eos_token_id + 1 :]).all())
self.assertListEqual(scores[:, eos_token_id].tolist() , 4 * [0]) # score for eos_token_id should be zero
# check that eos_token_id is not forced if max_length is not reached
lowercase__: Union[str, Any] = 3
lowercase__: Optional[int] = self._get_uniform_logits(UpperCAmelCase_ , UpperCAmelCase_)
lowercase__: Dict = logits_processor(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_)
self.assertFalse(jnp.isinf(UpperCAmelCase_).any())
def __lowercase ( self) -> List[str]:
'''simple docstring'''
lowercase__: Dict = 4
lowercase__: Optional[Any] = 10
lowercase__: Optional[int] = 15
lowercase__: Any = 2
lowercase__: Dict = 1
lowercase__: Tuple = 15
# dummy input_ids and scores
lowercase__: Optional[Any] = ids_tensor((batch_size, sequence_length) , UpperCAmelCase_)
lowercase__: Optional[Any] = input_ids.copy()
lowercase__: Union[str, Any] = self._get_uniform_logits(UpperCAmelCase_ , UpperCAmelCase_)
lowercase__: List[Any] = scores.copy()
# instantiate all dist processors
lowercase__: str = FlaxTemperatureLogitsWarper(temperature=0.5)
lowercase__: Optional[int] = FlaxTopKLogitsWarper(3)
lowercase__: List[Any] = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
lowercase__: str = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase_)
lowercase__: Union[str, Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase_)
lowercase__: List[str] = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_)
lowercase__: Any = 10
# no processor list
lowercase__: str = temp_dist_warp(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_)
lowercase__: Optional[int] = top_k_warp(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_)
lowercase__: List[str] = top_p_warp(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_)
lowercase__: Union[str, Any] = min_dist_proc(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_)
lowercase__: Any = bos_dist_proc(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_)
lowercase__: Optional[Any] = eos_dist_proc(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_)
# with processor list
lowercase__: int = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
lowercase__: Optional[Any] = processor(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_)
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
lowercase__: Optional[Any] = 4
lowercase__: Any = 10
lowercase__: Tuple = 15
lowercase__: List[Any] = 2
lowercase__: Tuple = 1
lowercase__: List[str] = 15
# dummy input_ids and scores
lowercase__: Optional[Any] = ids_tensor((batch_size, sequence_length) , UpperCAmelCase_)
lowercase__: int = input_ids.copy()
lowercase__: List[Any] = self._get_uniform_logits(UpperCAmelCase_ , UpperCAmelCase_)
lowercase__: Optional[Any] = scores.copy()
# instantiate all dist processors
lowercase__: Optional[Any] = FlaxTemperatureLogitsWarper(temperature=0.5)
lowercase__: Tuple = FlaxTopKLogitsWarper(3)
lowercase__: List[Any] = FlaxTopPLogitsWarper(0.8)
# instantiate all logits processors
lowercase__: int = FlaxMinLengthLogitsProcessor(min_length=10 , eos_token_id=UpperCAmelCase_)
lowercase__: Optional[Any] = FlaxForcedBOSTokenLogitsProcessor(bos_token_id=UpperCAmelCase_)
lowercase__: List[str] = FlaxForcedEOSTokenLogitsProcessor(max_length=UpperCAmelCase_ , eos_token_id=UpperCAmelCase_)
lowercase__: List[Any] = 10
# no processor list
def run_no_processor_list(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_):
lowercase__: List[str] = temp_dist_warp(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_)
lowercase__: Optional[Any] = top_k_warp(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_)
lowercase__: int = top_p_warp(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_)
lowercase__: Optional[int] = min_dist_proc(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_)
lowercase__: Optional[int] = bos_dist_proc(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_)
lowercase__: List[str] = eos_dist_proc(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_)
return scores
# with processor list
def run_processor_list(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_):
lowercase__: Dict = FlaxLogitsProcessorList(
[temp_dist_warp, top_k_warp, top_p_warp, min_dist_proc, bos_dist_proc, eos_dist_proc])
lowercase__: Union[str, Any] = processor(UpperCAmelCase_ , UpperCAmelCase_ , cur_len=UpperCAmelCase_)
return scores
lowercase__: str = jax.jit(UpperCAmelCase_)
lowercase__: Tuple = jax.jit(UpperCAmelCase_)
lowercase__: Any = jitted_run_no_processor_list(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
lowercase__: List[Any] = jitted_run_processor_list(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_)
# scores should be equal
self.assertTrue(jnp.allclose(UpperCAmelCase_ , UpperCAmelCase_ , atol=1E-3))
# input_ids should never be changed
self.assertListEqual(input_ids.tolist() , input_ids_comp.tolist())
| 718 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase = {
"""configuration_blenderbot_small""": [
"""BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""BlenderbotSmallConfig""",
"""BlenderbotSmallOnnxConfig""",
],
"""tokenization_blenderbot_small""": ["""BlenderbotSmallTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = ["""BlenderbotSmallTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BlenderbotSmallForCausalLM""",
"""BlenderbotSmallForConditionalGeneration""",
"""BlenderbotSmallModel""",
"""BlenderbotSmallPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""TFBlenderbotSmallForConditionalGeneration""",
"""TFBlenderbotSmallModel""",
"""TFBlenderbotSmallPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase = [
"""FlaxBlenderbotSmallForConditionalGeneration""",
"""FlaxBlenderbotSmallModel""",
"""FlaxBlenderbotSmallPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotSmallConfig,
BlenderbotSmallOnnxConfig,
)
from .tokenization_blenderbot_small import BlenderbotSmallTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_small_fast import BlenderbotSmallTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot_small import (
BLENDERBOT_SMALL_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotSmallForCausalLM,
BlenderbotSmallForConditionalGeneration,
BlenderbotSmallModel,
BlenderbotSmallPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot_small import (
TFBlenderbotSmallForConditionalGeneration,
TFBlenderbotSmallModel,
TFBlenderbotSmallPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot_small import (
FlaxBlenderbotSmallForConditionalGeneration,
FlaxBlenderbotSmallModel,
FlaxBlenderbotSmallPreTrainedModel,
)
else:
import sys
UpperCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 120 | 0 |
'''simple docstring'''
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> float:
if principal <= 0:
raise Exception('Principal borrowed must be > 0' )
if rate_per_annum < 0:
raise Exception('Rate of interest must be >= 0' )
if years_to_repay <= 0 or not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
raise Exception('Years to repay must be an integer > 0' )
# Yearly rate is divided by 12 to get monthly rate
_a : Tuple = rate_per_annum / 12
# Years to repay is multiplied by 12 to get number of payments as payment is monthly
_a : int = years_to_repay * 12
return (
principal
* rate_per_month
* (1 + rate_per_month) ** number_of_payments
/ ((1 + rate_per_month) ** number_of_payments - 1)
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 358 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : Any = StableDiffusionInpaintPipeline
lowerCAmelCase : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
lowerCAmelCase : Optional[Any] = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCAmelCase : List[str] = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase : Optional[Any] = frozenset([] )
def __lowercase ( self : Dict ):
torch.manual_seed(0 )
_a : Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=9 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,attention_head_dim=(2, 4) ,use_linear_projection=_UpperCAmelCase ,)
_a : Optional[int] = PNDMScheduler(skip_prk_steps=_UpperCAmelCase )
torch.manual_seed(0 )
_a : Dict = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,sample_size=128 ,)
torch.manual_seed(0 )
_a : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act='gelu' ,projection_dim=512 ,)
_a : str = CLIPTextModel(_UpperCAmelCase )
_a : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_a : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowercase ( self : List[Any] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : int=0 ):
# TODO: use tensor inputs instead of PIL, this is here just to leave the old expected_slices untouched
_a : Any = floats_tensor((1, 3, 32, 32) ,rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
_a : Any = image.cpu().permute(0 ,2 ,3 ,1 )[0]
_a : Optional[int] = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert('RGB' ).resize((64, 64) )
_a : Tuple = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((64, 64) )
if str(_UpperCAmelCase ).startswith('mps' ):
_a : str = torch.manual_seed(_UpperCAmelCase )
else:
_a : Dict = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
_a : Any = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowercase ( self : List[str] ):
_a : Optional[int] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_a : Dict = self.get_dummy_components()
_a : int = StableDiffusionInpaintPipeline(**_UpperCAmelCase )
_a : Any = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
_a : Dict = self.get_dummy_inputs(_UpperCAmelCase )
_a : Tuple = sd_pipe(**_UpperCAmelCase ).images
_a : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_a : List[Any] = np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowercase ( self : List[Any] ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
@slow
@require_torch_gpu
class __magic_name__ ( unittest.TestCase ):
def __lowercase ( self : List[str] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowercase ( self : int ):
_a : Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_a : Union[str, Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_a : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy' )
_a : List[Any] = 'stabilityai/stable-diffusion-2-inpainting'
_a : Tuple = StableDiffusionInpaintPipeline.from_pretrained(_UpperCAmelCase ,safety_checker=_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
_a : int = 'Face of a yellow cat, high resolution, sitting on a park bench'
_a : Dict = torch.manual_seed(0 )
_a : Any = pipe(
prompt=_UpperCAmelCase ,image=_UpperCAmelCase ,mask_image=_UpperCAmelCase ,generator=_UpperCAmelCase ,output_type='np' ,)
_a : List[str] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 9E-3
def __lowercase ( self : Tuple ):
_a : str = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_a : Optional[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_a : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy' )
_a : int = 'stabilityai/stable-diffusion-2-inpainting'
_a : List[str] = StableDiffusionInpaintPipeline.from_pretrained(
_UpperCAmelCase ,torch_dtype=torch.floataa ,safety_checker=_UpperCAmelCase ,)
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
_a : Dict = 'Face of a yellow cat, high resolution, sitting on a park bench'
_a : int = torch.manual_seed(0 )
_a : Dict = pipe(
prompt=_UpperCAmelCase ,image=_UpperCAmelCase ,mask_image=_UpperCAmelCase ,generator=_UpperCAmelCase ,output_type='np' ,)
_a : str = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def __lowercase ( self : Tuple ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_a : Dict = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
_a : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
_a : List[str] = 'stabilityai/stable-diffusion-2-inpainting'
_a : Any = PNDMScheduler.from_pretrained(_UpperCAmelCase ,subfolder='scheduler' )
_a : Optional[Any] = StableDiffusionInpaintPipeline.from_pretrained(
_UpperCAmelCase ,safety_checker=_UpperCAmelCase ,scheduler=_UpperCAmelCase ,torch_dtype=torch.floataa ,)
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_a : Any = 'Face of a yellow cat, high resolution, sitting on a park bench'
_a : Optional[Any] = torch.manual_seed(0 )
_a : int = pipe(
prompt=_UpperCAmelCase ,image=_UpperCAmelCase ,mask_image=_UpperCAmelCase ,generator=_UpperCAmelCase ,num_inference_steps=2 ,output_type='np' ,)
_a : int = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 10**9
| 358 | 1 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
A_ : int = StableDiffusionLDMaDPipeline
A_ : Any = TEXT_TO_IMAGE_PARAMS
A_ : str = TEXT_TO_IMAGE_BATCH_PARAMS
A_ : Dict = TEXT_TO_IMAGE_IMAGE_PARAMS
def __lowerCamelCase ( self : int ) -> List[str]:
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ :Dict = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE__ :int = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=UpperCamelCase_ , set_alpha_to_one=UpperCamelCase_ , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ :Optional[int] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ :List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
SCREAMING_SNAKE_CASE__ :List[str] = CLIPTextModel(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE__ :str = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __lowerCamelCase ( self : List[Any] , UpperCamelCase_ : List[str] , UpperCamelCase_ : Union[str, Any]=0 ) -> Dict:
if str(UpperCamelCase_ ).startswith('mps' ):
SCREAMING_SNAKE_CASE__ :List[str] = torch.manual_seed(UpperCamelCase_ )
else:
SCREAMING_SNAKE_CASE__ :Dict = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[int] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self : Any ) -> str:
SCREAMING_SNAKE_CASE__ :Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ :int = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ :Tuple = StableDiffusionLDMaDPipeline(**UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :str = ldmad_pipe.to(UpperCamelCase_ )
ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = self.get_dummy_inputs(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Dict = ldmad_pipe(**UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :List[Any] = output.rgb, output.depth
SCREAMING_SNAKE_CASE__ :List[str] = rgb[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ :int = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
SCREAMING_SNAKE_CASE__ :Optional[Any] = np.array(
[0.3733_8176, 0.7_0247, 0.7420_3193, 0.5164_3604, 0.5825_6793, 0.6093_2136, 0.418_1095, 0.4835_5877, 0.4653_5262] )
SCREAMING_SNAKE_CASE__ :str = np.array([103.4_6727, 85.81_2004, 87.84_9236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1e-2
def __lowerCamelCase ( self : Union[str, Any] ) -> int:
SCREAMING_SNAKE_CASE__ :Dict = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ :Optional[int] = StableDiffusionLDMaDPipeline(**UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Dict = ldmad_pipe.to(UpperCamelCase_ )
ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :str = self.get_dummy_inputs(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Tuple = 3 * [inputs['prompt']]
# forward
SCREAMING_SNAKE_CASE__ :Optional[int] = ldmad_pipe(**UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :List[str] = output.rgb, output.depth
SCREAMING_SNAKE_CASE__ :int = rgb_slice_a[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ :Dict = depth_slice_a[0, -3:, -1]
SCREAMING_SNAKE_CASE__ :Tuple = self.get_dummy_inputs(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :str = 3 * [inputs.pop('prompt' )]
SCREAMING_SNAKE_CASE__ :List[str] = ldmad_pipe.tokenizer(
UpperCamelCase_ , padding='max_length' , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=UpperCamelCase_ , return_tensors='pt' , )
SCREAMING_SNAKE_CASE__ :List[Any] = text_inputs['input_ids'].to(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Tuple = ldmad_pipe.text_encoder(UpperCamelCase_ )[0]
SCREAMING_SNAKE_CASE__ :Optional[Any] = prompt_embeds
# forward
SCREAMING_SNAKE_CASE__ :Optional[Any] = ldmad_pipe(**UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[Any] = output.rgb, output.depth
SCREAMING_SNAKE_CASE__ :Dict = rgb_slice_a[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ :Optional[Any] = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1e-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1e-4
def __lowerCamelCase ( self : List[str] ) -> List[str]:
SCREAMING_SNAKE_CASE__ :List[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ :Dict = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ :List[Any] = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Tuple = StableDiffusionLDMaDPipeline(**UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Tuple = ldmad_pipe.to(UpperCamelCase_ )
ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :str = self.get_dummy_inputs(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :List[Any] = 'french fries'
SCREAMING_SNAKE_CASE__ :Union[str, Any] = ldmad_pipe(**UpperCamelCase_ , negative_prompt=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Dict = output.rgb, output.depth
SCREAMING_SNAKE_CASE__ :Dict = rgb[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ :Optional[Any] = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
SCREAMING_SNAKE_CASE__ :Tuple = np.array(
[0.3_7044, 0.7181_1503, 0.722_3251, 0.4860_3675, 0.563_8391, 0.636_4948, 0.4283_3704, 0.490_1315, 0.4792_6217] )
SCREAMING_SNAKE_CASE__ :Any = np.array([107.8_4738, 84.6_2802, 89.96_2135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1e-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1e-2
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
def __lowerCamelCase ( self : Optional[Any] ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self : Union[str, Any] , UpperCamelCase_ : int , UpperCamelCase_ : Optional[int]="cpu" , UpperCamelCase_ : Union[str, Any]=torch.floataa , UpperCamelCase_ : List[str]=0 ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Dict = np.random.RandomState(UpperCamelCase_ ).standard_normal((1, 4, 64, 64) )
SCREAMING_SNAKE_CASE__ :int = torch.from_numpy(UpperCamelCase_ ).to(device=UpperCamelCase_ , dtype=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :List[str] = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self : Optional[int] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ :Optional[int] = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' )
SCREAMING_SNAKE_CASE__ :List[Any] = ldmad_pipe.to(UpperCamelCase_ )
ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :List[str] = self.get_inputs(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :int = ldmad_pipe(**UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[Any] = output.rgb, output.depth
SCREAMING_SNAKE_CASE__ :Any = rgb[0, -3:, -3:, -1].flatten()
SCREAMING_SNAKE_CASE__ :Tuple = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 5_12, 5_12, 3)
assert depth.shape == (1, 5_12, 5_12)
SCREAMING_SNAKE_CASE__ :str = np.array(
[0.5380_5465, 0.5670_7305, 0.548_6515, 0.5701_2236, 0.581_4511, 0.5625_3487, 0.5484_3014, 0.5509_2263, 0.645_9706] )
SCREAMING_SNAKE_CASE__ :List[Any] = np.array(
[0.926_3781, 0.667_8672, 0.548_6515, 0.9220_2145, 0.6783_1135, 0.5625_3487, 0.924_1694, 0.755_1478, 0.645_9706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3e-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3e-3
@nightly
@require_torch_gpu
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
def __lowerCamelCase ( self : int ) -> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : int="cpu" , UpperCamelCase_ : Any=torch.floataa , UpperCamelCase_ : Union[str, Any]=0 ) -> int:
SCREAMING_SNAKE_CASE__ :Optional[int] = torch.Generator(device=UpperCamelCase_ ).manual_seed(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :List[str] = np.random.RandomState(UpperCamelCase_ ).standard_normal((1, 4, 64, 64) )
SCREAMING_SNAKE_CASE__ :int = torch.from_numpy(UpperCamelCase_ ).to(device=UpperCamelCase_ , dtype=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :List[Any] = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self : Tuple ) -> List[Any]:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' ).to(UpperCamelCase_ )
ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[int] = self.get_inputs(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Optional[int] = ldmad_pipe(**UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = output.rgb, output.depth
SCREAMING_SNAKE_CASE__ :Union[str, Any] = 0.49_5586
SCREAMING_SNAKE_CASE__ :Dict = 0.3379_5515
SCREAMING_SNAKE_CASE__ :int = 112.4_8518
SCREAMING_SNAKE_CASE__ :List[Any] = 98.48_9746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
def __lowerCamelCase ( self : Any ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ :Tuple = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d-4c' ).to(UpperCamelCase_ )
ldmad_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Dict = self.get_inputs(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Any = ldmad_pipe(**UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Any = output.rgb, output.depth
SCREAMING_SNAKE_CASE__ :Any = 0.419_4127
SCREAMING_SNAKE_CASE__ :Tuple = 0.3537_5586
SCREAMING_SNAKE_CASE__ :Tuple = 0.563_8502
SCREAMING_SNAKE_CASE__ :Dict = 0.3468_6103
assert rgb.shape == (1, 5_12, 5_12, 3)
assert depth.shape == (1, 5_12, 5_12, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1e-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1e-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1e-3
assert np.abs(expected_depth_std - depth.std() ) < 1e-3
| 717 | '''simple docstring'''
import unittest
import numpy as np
from diffusers import LMSDiscreteScheduler, OnnxStableDiffusionInpaintPipeline
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class _SCREAMING_SNAKE_CASE( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
# FIXME: add fast tests
pass
@nightly
@require_onnxruntime
@require_torch_gpu
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
@property
def __lowerCamelCase ( self : int ) -> List[Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def __lowerCamelCase ( self : str ) -> Any:
SCREAMING_SNAKE_CASE__ :Union[str, Any] = ort.SessionOptions()
SCREAMING_SNAKE_CASE__ :List[str] = False
return options
def __lowerCamelCase ( self : Dict ) -> Dict:
SCREAMING_SNAKE_CASE__ :Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
SCREAMING_SNAKE_CASE__ :Tuple = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
SCREAMING_SNAKE_CASE__ :Dict = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting' , revision='onnx' , safety_checker=UpperCamelCase_ , feature_extractor=UpperCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Any = 'A red cat sitting on a park bench'
SCREAMING_SNAKE_CASE__ :Tuple = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE__ :Any = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , guidance_scale=7.5 , num_inference_steps=10 , generator=UpperCamelCase_ , output_type='np' , )
SCREAMING_SNAKE_CASE__ :Optional[int] = output.images
SCREAMING_SNAKE_CASE__ :List[str] = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE__ :List[Any] = np.array([0.2514, 0.3007, 0.3517, 0.1790, 0.2382, 0.3167, 0.1944, 0.2273, 0.2464] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def __lowerCamelCase ( self : str ) -> List[Any]:
SCREAMING_SNAKE_CASE__ :List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
SCREAMING_SNAKE_CASE__ :List[Any] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
SCREAMING_SNAKE_CASE__ :int = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-inpainting' , subfolder='scheduler' , revision='onnx' )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = OnnxStableDiffusionInpaintPipeline.from_pretrained(
'runwayml/stable-diffusion-inpainting' , revision='onnx' , scheduler=UpperCamelCase_ , safety_checker=UpperCamelCase_ , feature_extractor=UpperCamelCase_ , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :List[Any] = 'A red cat sitting on a park bench'
SCREAMING_SNAKE_CASE__ :Dict = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE__ :Optional[Any] = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , mask_image=UpperCamelCase_ , guidance_scale=7.5 , num_inference_steps=20 , generator=UpperCamelCase_ , output_type='np' , )
SCREAMING_SNAKE_CASE__ :Dict = output.images
SCREAMING_SNAKE_CASE__ :List[Any] = images[0, 2_55:2_58, 2_55:2_58, -1]
assert images.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE__ :List[str] = np.array([0.0086, 0.0077, 0.0083, 0.0093, 0.0107, 0.0139, 0.0094, 0.0097, 0.0125] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
| 320 | 0 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def A ( lowercase__ : str , lowercase__ : complex , lowercase__ : str = "x" , lowercase__ : float = 10**-10 , lowercase__ : int = 1 , ) -> complex:
UpperCamelCase__ :Optional[int] = symbols(lowercase__ )
UpperCamelCase__ :Dict = lambdify(lowercase__ , lowercase__ )
UpperCamelCase__ :Any = lambdify(lowercase__ , diff(lowercase__ , lowercase__ ) )
UpperCamelCase__ :List[str] = starting_point
while True:
if diff_function(lowercase__ ) != 0:
UpperCamelCase__ :List[Any] = prev_guess - multiplicity * func(lowercase__ ) / diff_function(
lowercase__ )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
UpperCamelCase__ :Optional[Any] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
# Find fourth Root of 5
print(f'''The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}''')
# Find value of e
print(
"The root of log(y) - 1 = 0 is ",
f'''{newton_raphson('log(y) - 1', 2, variable='y')}''',
)
# Exponential Roots
print(
"The root of exp(x) - 1 = 0 is",
f'''{newton_raphson('exp(x) - 1', 10, precision=0.005)}''',
)
# Find root of cos(x)
print(f'''The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}''') | 45 |
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class _lowerCAmelCase ( UpperCAmelCase_ ):
'''simple docstring'''
def __init__( self : List[Any] ):
'''simple docstring'''
_snake_case : Any = []
def UpperCamelCase_ ( self : int , UpperCamelCase : Union[str, Any] , UpperCamelCase : Tuple , UpperCamelCase : int , **UpperCamelCase : Any ):
'''simple docstring'''
self.events.append('on_init_end' )
def UpperCamelCase_ ( self : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] , **UpperCamelCase : List[Any] ):
'''simple docstring'''
self.events.append('on_train_begin' )
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Optional[Any] , UpperCamelCase : Dict , UpperCamelCase : List[str] , **UpperCamelCase : Any ):
'''simple docstring'''
self.events.append('on_train_end' )
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : List[Any] , UpperCamelCase : Any , UpperCamelCase : str , **UpperCamelCase : str ):
'''simple docstring'''
self.events.append('on_epoch_begin' )
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : int , UpperCamelCase : List[Any] , UpperCamelCase : str , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
self.events.append('on_epoch_end' )
def UpperCamelCase_ ( self : Any , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[int] , **UpperCamelCase : Optional[Any] ):
'''simple docstring'''
self.events.append('on_step_begin' )
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : Union[str, Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[int] , **UpperCamelCase : Dict ):
'''simple docstring'''
self.events.append('on_step_end' )
def UpperCamelCase_ ( self : str , UpperCamelCase : Any , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
self.events.append('on_evaluate' )
def UpperCamelCase_ ( self : List[Any] , UpperCamelCase : List[str] , UpperCamelCase : Optional[int] , UpperCamelCase : str , **UpperCamelCase : List[Any] ):
'''simple docstring'''
self.events.append('on_predict' )
def UpperCamelCase_ ( self : int , UpperCamelCase : int , UpperCamelCase : Tuple , UpperCamelCase : int , **UpperCamelCase : Optional[int] ):
'''simple docstring'''
self.events.append('on_save' )
def UpperCamelCase_ ( self : Optional[int] , UpperCamelCase : Optional[Any] , UpperCamelCase : List[str] , UpperCamelCase : Any , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
self.events.append('on_log' )
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : str , UpperCamelCase : Any , UpperCamelCase : str , **UpperCamelCase : Dict ):
'''simple docstring'''
self.events.append('on_prediction_step' )
@require_torch
class _lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_snake_case : str = tempfile.mkdtemp()
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
shutil.rmtree(self.output_dir )
def UpperCamelCase_ ( self : Union[str, Any] , UpperCamelCase : Tuple=0 , UpperCamelCase : str=0 , UpperCamelCase : str=64 , UpperCamelCase : str=64 , UpperCamelCase : List[Any]=None , UpperCamelCase : Union[str, Any]=False , **UpperCamelCase : List[str] ):
'''simple docstring'''
_snake_case : Tuple = RegressionDataset(length=UpperCamelCase )
_snake_case : int = RegressionDataset(length=UpperCamelCase )
_snake_case : Tuple = RegressionModelConfig(a=UpperCamelCase , b=UpperCamelCase )
_snake_case : Union[str, Any] = RegressionPreTrainedModel(UpperCamelCase )
_snake_case : Optional[Any] = TrainingArguments(self.output_dir , disable_tqdm=UpperCamelCase , report_to=[] , **UpperCamelCase )
return Trainer(
UpperCamelCase , UpperCamelCase , train_dataset=UpperCamelCase , eval_dataset=UpperCamelCase , callbacks=UpperCamelCase , )
def UpperCamelCase_ ( self : Tuple , UpperCamelCase : int , UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
self.assertEqual(len(UpperCamelCase ) , len(UpperCamelCase ) )
# Order doesn't matter
_snake_case : Optional[Any] = sorted(UpperCamelCase , key=lambda UpperCamelCase : cb.__name__ if isinstance(UpperCamelCase , UpperCamelCase ) else cb.__class__.__name__ )
_snake_case : Dict = sorted(UpperCamelCase , key=lambda UpperCamelCase : cb.__name__ if isinstance(UpperCamelCase , UpperCamelCase ) else cb.__class__.__name__ )
for cba, cba in zip(UpperCamelCase , UpperCamelCase ):
if isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase ):
self.assertEqual(UpperCamelCase , UpperCamelCase )
elif isinstance(UpperCamelCase , UpperCamelCase ) and not isinstance(UpperCamelCase , UpperCamelCase ):
self.assertEqual(UpperCamelCase , cba.__class__ )
elif not isinstance(UpperCamelCase , UpperCamelCase ) and isinstance(UpperCamelCase , UpperCamelCase ):
self.assertEqual(cba.__class__ , UpperCamelCase )
else:
self.assertEqual(UpperCamelCase , UpperCamelCase )
def UpperCamelCase_ ( self : Dict , UpperCamelCase : Dict ):
'''simple docstring'''
_snake_case : Dict = ['on_init_end', 'on_train_begin']
_snake_case : List[str] = 0
_snake_case : str = len(trainer.get_eval_dataloader() )
_snake_case : str = ['on_prediction_step'] * len(trainer.get_eval_dataloader() ) + ['on_log', 'on_evaluate']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('on_epoch_begin' )
for _ in range(UpperCamelCase ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('on_log' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('on_save' )
expected_events.append('on_epoch_end' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
_snake_case : int = self.get_trainer()
_snake_case : List[Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase )
# Callbacks passed at init are added to the default callbacks
_snake_case : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(UpperCamelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
_snake_case : Tuple = self.get_trainer(disable_tqdm=UpperCamelCase )
_snake_case : Optional[int] = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
_snake_case : Tuple = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
_snake_case : List[Any] = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(UpperCamelCase )
expected_callbacks.remove(UpperCamelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase )
_snake_case : Optional[Any] = self.get_trainer()
_snake_case : Dict = trainer.pop_callback(UpperCamelCase )
self.assertEqual(cb.__class__ , UpperCamelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase )
trainer.add_callback(UpperCamelCase )
expected_callbacks.insert(0 , UpperCamelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase )
# We can also add, pop, or remove by instance
_snake_case : Optional[Any] = self.get_trainer()
_snake_case : Optional[int] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(UpperCamelCase )
expected_callbacks.remove(UpperCamelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase )
_snake_case : int = self.get_trainer()
_snake_case : int = trainer.callback_handler.callbacks[0]
_snake_case : int = trainer.pop_callback(UpperCamelCase )
self.assertEqual(UpperCamelCase , UpperCamelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase )
trainer.add_callback(UpperCamelCase )
expected_callbacks.insert(0 , UpperCamelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , UpperCamelCase )
def UpperCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='ignore' , category=UpperCamelCase )
_snake_case : Any = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
_snake_case : List[str] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase , self.get_expected_events(UpperCamelCase ) )
# Independent log/save/eval
_snake_case : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
_snake_case : List[Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase , self.get_expected_events(UpperCamelCase ) )
_snake_case : Optional[int] = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
_snake_case : Dict = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase , self.get_expected_events(UpperCamelCase ) )
_snake_case : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='steps' )
trainer.train()
_snake_case : Union[str, Any] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase , self.get_expected_events(UpperCamelCase ) )
_snake_case : Tuple = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='epoch' )
trainer.train()
_snake_case : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase , self.get_expected_events(UpperCamelCase ) )
# A bit of everything
_snake_case : int = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='steps' , )
trainer.train()
_snake_case : Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(UpperCamelCase , self.get_expected_events(UpperCamelCase ) )
# warning should be emitted for duplicated callbacks
with patch('transformers.trainer_callback.logger.warning' ) as warn_mock:
_snake_case : Dict = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(UpperCamelCase ) in warn_mock.call_args[0][0]
| 411 | 0 |
import unittest
from transformers import EsmConfig, is_torch_available
from transformers.testing_utils import TestCasePlus, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import EsmForMaskedLM, EsmForSequenceClassification, EsmForTokenClassification, EsmModel
from transformers.models.esm.modeling_esm import (
ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
EsmEmbeddings,
create_position_ids_from_input_ids,
)
class A_ :
'''simple docstring'''
def __init__( self: Tuple , a: Any , a: Optional[int]=13 , a: str=7 , a: str=False , a: Optional[int]=True , a: Optional[int]=False , a: List[str]=True , a: List[str]=33 , a: List[Any]=32 , a: Tuple=5 , a: int=4 , a: Tuple=37 , a: Union[str, Any]="gelu" , a: int=0.1 , a: Tuple=0.1 , a: Union[str, Any]=512 , a: Dict=16 , a: Optional[Any]=2 , a: str=0.0_2 , a: int=3 , a: Optional[Any]=4 , a: List[str]=None , ):
__lowerCamelCase : Tuple = parent
__lowerCamelCase : Optional[int] = batch_size
__lowerCamelCase : int = seq_length
__lowerCamelCase : str = is_training
__lowerCamelCase : Any = use_input_mask
__lowerCamelCase : List[str] = use_token_type_ids
__lowerCamelCase : Tuple = use_labels
__lowerCamelCase : List[str] = vocab_size
__lowerCamelCase : List[Any] = hidden_size
__lowerCamelCase : str = num_hidden_layers
__lowerCamelCase : Optional[Any] = num_attention_heads
__lowerCamelCase : Dict = intermediate_size
__lowerCamelCase : Optional[int] = hidden_act
__lowerCamelCase : int = hidden_dropout_prob
__lowerCamelCase : Tuple = attention_probs_dropout_prob
__lowerCamelCase : Dict = max_position_embeddings
__lowerCamelCase : Optional[Any] = type_vocab_size
__lowerCamelCase : Union[str, Any] = type_sequence_label_size
__lowerCamelCase : List[str] = initializer_range
__lowerCamelCase : str = num_labels
__lowerCamelCase : str = num_choices
__lowerCamelCase : List[Any] = scope
def _snake_case ( self: Optional[Any] ):
__lowerCamelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase : Union[str, Any] = None
if self.use_input_mask:
__lowerCamelCase : Any = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase : Optional[int] = None
__lowerCamelCase : Tuple = None
__lowerCamelCase : Optional[int] = None
if self.use_labels:
__lowerCamelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCamelCase : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase : str = ids_tensor([self.batch_size] , self.num_choices )
__lowerCamelCase : Any = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self: Union[str, Any] ):
return EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , pad_token_id=1 , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
def _snake_case ( self: str , a: int , a: Any , a: str , a: List[str] , a: Union[str, Any] , a: Optional[Any] ):
__lowerCamelCase : Union[str, Any] = EsmModel(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__lowerCamelCase : int = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ )
__lowerCamelCase : Optional[Any] = model(UpperCAmelCase__ )
__lowerCamelCase : Any = model(UpperCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _snake_case ( self: Optional[Any] , a: int , a: List[str] , a: Dict , a: List[Any] , a: Dict , a: List[Any] ):
__lowerCamelCase : int = EsmForMaskedLM(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__lowerCamelCase : int = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self: Union[str, Any] , a: List[str] , a: Dict , a: Union[str, Any] , a: Tuple , a: Dict , a: List[Any] ):
__lowerCamelCase : int = self.num_labels
__lowerCamelCase : Optional[Any] = EsmForTokenClassification(config=UpperCAmelCase__ )
model.to(UpperCAmelCase__ )
model.eval()
__lowerCamelCase : Optional[int] = model(UpperCAmelCase__ , attention_mask=UpperCAmelCase__ , labels=UpperCAmelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self: int ):
__lowerCamelCase : List[str] = self.prepare_config_and_inputs()
(
__lowerCamelCase
) : str = config_and_inputs
__lowerCamelCase : str = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class A_ ( __UpperCamelCase , __UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
__snake_case = False
__snake_case = (
(
EsmForMaskedLM,
EsmModel,
EsmForSequenceClassification,
EsmForTokenClassification,
)
if is_torch_available()
else ()
)
__snake_case = ()
__snake_case = (
{
'feature-extraction': EsmModel,
'fill-mask': EsmForMaskedLM,
'text-classification': EsmForSequenceClassification,
'token-classification': EsmForTokenClassification,
'zero-shot': EsmForSequenceClassification,
}
if is_torch_available()
else {}
)
__snake_case = True
def _snake_case ( self: Any ):
__lowerCamelCase : Tuple = EsmModelTester(self )
__lowerCamelCase : Union[str, Any] = ConfigTester(self , config_class=UpperCAmelCase__ , hidden_size=37 )
def _snake_case ( self: Optional[int] ):
self.config_tester.run_common_tests()
def _snake_case ( self: int ):
__lowerCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__lowerCamelCase : Tuple = type
self.model_tester.create_and_check_model(*UpperCAmelCase__ )
def _snake_case ( self: List[str] ):
__lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*UpperCAmelCase__ )
def _snake_case ( self: Optional[int] ):
__lowerCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*UpperCAmelCase__ )
@slow
def _snake_case ( self: Union[str, Any] ):
for model_name in ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCamelCase : Dict = EsmModel.from_pretrained(UpperCAmelCase__ )
self.assertIsNotNone(UpperCAmelCase__ )
def _snake_case ( self: Tuple ):
__lowerCamelCase : str = self.model_tester.prepare_config_and_inputs()[0]
__lowerCamelCase : Union[str, Any] = EsmEmbeddings(config=UpperCAmelCase__ )
__lowerCamelCase : Optional[int] = torch.as_tensor([[12, 31, 13, model.padding_idx]] )
__lowerCamelCase : Any = torch.as_tensor(
[
[
0 + model.padding_idx + 1,
1 + model.padding_idx + 1,
2 + model.padding_idx + 1,
model.padding_idx,
]
] )
__lowerCamelCase : Optional[int] = create_position_ids_from_input_ids(UpperCAmelCase__ , model.padding_idx )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCAmelCase__ , UpperCAmelCase__ ) ) )
def _snake_case ( self: int ):
__lowerCamelCase : Any = self.model_tester.prepare_config_and_inputs()[0]
__lowerCamelCase : str = EsmEmbeddings(config=UpperCAmelCase__ )
__lowerCamelCase : List[str] = torch.empty(2 , 4 , 30 )
__lowerCamelCase : Any = [
0 + embeddings.padding_idx + 1,
1 + embeddings.padding_idx + 1,
2 + embeddings.padding_idx + 1,
3 + embeddings.padding_idx + 1,
]
__lowerCamelCase : Any = torch.as_tensor([expected_single_positions, expected_single_positions] )
__lowerCamelCase : str = embeddings.create_position_ids_from_inputs_embeds(UpperCAmelCase__ )
self.assertEqual(position_ids.shape , expected_positions.shape )
self.assertTrue(torch.all(torch.eq(UpperCAmelCase__ , UpperCAmelCase__ ) ) )
@unittest.skip('Esm does not support embedding resizing' )
def _snake_case ( self: int ):
pass
@unittest.skip('Esm does not support embedding resizing' )
def _snake_case ( self: str ):
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _snake_case ( self: Optional[int] ):
pass
@require_torch
class A_ ( __UpperCamelCase ):
'''simple docstring'''
@slow
def _snake_case ( self: Optional[int] ):
with torch.no_grad():
__lowerCamelCase : str = EsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
__lowerCamelCase : Dict = torch.tensor([[0, 1, 2, 3, 4, 5]] )
__lowerCamelCase : Union[str, Any] = model(UpperCAmelCase__ )[0]
__lowerCamelCase : Tuple = 33
__lowerCamelCase : Any = torch.Size((1, 6, vocab_size) )
self.assertEqual(output.shape , UpperCAmelCase__ )
__lowerCamelCase : List[Any] = torch.tensor(
[[[8.9_2_1_5, -1_0.5_8_9_8, -6.4_6_7_1], [-6.3_9_6_7, -1_3.9_1_1_4, -1.1_2_1_2], [-7.7_8_1_2, -1_3.9_5_1_6, -3.7_4_0_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-4 ) )
@slow
def _snake_case ( self: int ):
with torch.no_grad():
__lowerCamelCase : Optional[int] = EsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
model.eval()
__lowerCamelCase : Union[str, Any] = torch.tensor([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
__lowerCamelCase : int = model(UpperCAmelCase__ )[0]
# compare the actual values for a slice.
__lowerCamelCase : List[str] = torch.tensor(
[[[0.1_4_4_4, 0.5_4_1_3, 0.3_2_4_8], [0.3_0_3_4, 0.0_0_5_3, 0.3_1_0_8], [0.3_2_2_8, -0.2_4_9_9, 0.3_4_1_5]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , UpperCAmelCase__ , atol=1e-4 ) ) | 718 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """naver-clova-ix/donut-base-finetuned-docvqa"""
__snake_case = (
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
__snake_case = """document_qa"""
__snake_case = AutoProcessor
__snake_case = VisionEncoderDecoderModel
__snake_case = ["""image""", """text"""]
__snake_case = ["""text"""]
def __init__( self: Dict , *a: List[Any] , **a: List[Any] ):
if not is_vision_available():
raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' )
super().__init__(*a , **a )
def _snake_case ( self: str , a: "Image" , a: str ):
__lowerCamelCase : str = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
__lowerCamelCase : Dict = task_prompt.replace('{user_input}' , a )
__lowerCamelCase : Optional[Any] = self.pre_processor.tokenizer(
a , add_special_tokens=a , return_tensors='pt' ).input_ids
__lowerCamelCase : Union[str, Any] = self.pre_processor(a , return_tensors='pt' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _snake_case ( self: Optional[Any] , a: Tuple ):
return self.model.generate(
inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=a , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=a , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=a , ).sequences
def _snake_case ( self: Optional[Any] , a: Any ):
__lowerCamelCase : Union[str, Any] = self.pre_processor.batch_decode(a )[0]
__lowerCamelCase : List[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , '' )
__lowerCamelCase : Optional[int] = sequence.replace(self.pre_processor.tokenizer.pad_token , '' )
__lowerCamelCase : Optional[int] = re.sub(R'<.*?>' , '' , a , count=1 ).strip() # remove first task start token
__lowerCamelCase : int = self.pre_processor.tokenajson(a )
return sequence["answer"]
| 230 | 0 |
"""simple docstring"""
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def _snake_case ( __snake_case : Any ):
"""simple docstring"""
return (data["data"], data["target"])
def _snake_case ( __snake_case : Optional[int] , __snake_case : Optional[int] ):
"""simple docstring"""
_lowerCamelCase : int = XGBClassifier()
classifier.fit(a__ , a__ )
return classifier
def _snake_case ( ):
"""simple docstring"""
_lowerCamelCase : str = load_iris()
_lowerCamelCase : List[str] = data_handling(a__ )
_lowerCamelCase : int = train_test_split(
a__ , a__ , test_size=0.25 )
_lowerCamelCase : Optional[Any] = iris['target_names']
# Create an XGBoost Classifier from the training data
_lowerCamelCase : Dict = xgboost(a__ , a__ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
a__ , a__ , a__ , display_labels=a__ , cmap="""Blues""" , normalize="""true""" , )
plt.title("""Normalized Confusion Matrix - IRIS Dataset""" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 88 |
"""simple docstring"""
import qiskit
def UpperCAmelCase ( a__ , a__ ):
'''simple docstring'''
lowerCAmelCase :List[Any] = qiskit.Aer.get_backend('aer_simulator' )
# Create a Quantum Circuit acting on the q register
lowerCAmelCase :Optional[int] = qiskit.QuantumCircuit(a__ , a__ )
# Apply X (NOT) Gate to Qubits 0 & 1
circuit.x(0 )
circuit.x(1 )
# Map the quantum measurement to the classical bits
circuit.measure([0, 1] , [0, 1] )
# Execute the circuit on the qasm simulator
lowerCAmelCase :Dict = qiskit.execute(a__ , a__ , shots=10_00 )
# Return the histogram data of the results of the experiment.
return job.result().get_counts(a__ )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = single_qubit_measure(2, 2)
print(F"""Total count for various states are: {counts}""") | 553 | 0 |
def _lowerCAmelCase ( A__ ):
lowercase__ = [0 for i in range(len(A__ ) )]
# initialize interval's left pointer and right pointer
lowercase__, lowercase__ = 0, 0
for i in range(1 , len(A__ ) ):
# case when current index is inside the interval
if i <= right_pointer:
lowercase__ = min(right_pointer - i + 1 , z_result[i - left_pointer] )
lowercase__ = min_edge
while go_next(A__ , A__ , A__ ):
z_result[i] += 1
# if new index's result gives us more right interval,
# we've to update left_pointer and right_pointer
if i + z_result[i] - 1 > right_pointer:
lowercase__, lowercase__ = i, i + z_result[i] - 1
return z_result
def _lowerCAmelCase ( A__ , A__ , A__ ):
return i + z_result[i] < len(A__ ) and s[z_result[i]] == s[i + z_result[i]]
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = 0
# concatenate 'pattern' and 'input_str' and call z_function
# with concatenated string
lowercase__ = z_function(pattern + input_str )
for val in z_result:
# if value is greater then length of the pattern string
# that means this index is starting position of substring
# which is equal to pattern string
if val >= len(A__ ):
answer += 1
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
| 642 |
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class UpperCAmelCase__:
'''simple docstring'''
def __init__( self : Tuple , lowerCAmelCase : str , lowerCAmelCase : Dict=13 , lowerCAmelCase : Dict=7 , lowerCAmelCase : int=True , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : str=True , lowerCAmelCase : int=True , lowerCAmelCase : List[Any]=99 , lowerCAmelCase : List[Any]=[1, 1, 2] , lowerCAmelCase : Optional[Any]=1 , lowerCAmelCase : int=32 , lowerCAmelCase : Union[str, Any]=4 , lowerCAmelCase : Tuple=8 , lowerCAmelCase : int=37 , lowerCAmelCase : Any="gelu_new" , lowerCAmelCase : str=0.1 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : Dict=0.0 , lowerCAmelCase : str=5_12 , lowerCAmelCase : str=3 , lowerCAmelCase : List[Any]=0.02 , lowerCAmelCase : Union[str, Any]=3 , lowerCAmelCase : Any=4 , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Optional[int]=False , ) -> List[Any]:
"""simple docstring"""
lowercase__ = parent
lowercase__ = batch_size
lowercase__ = seq_length
lowercase__ = is_training
lowercase__ = use_input_mask
lowercase__ = use_token_type_ids
lowercase__ = use_labels
lowercase__ = vocab_size
lowercase__ = block_sizes
lowercase__ = num_decoder_layers
lowercase__ = d_model
lowercase__ = n_head
lowercase__ = d_head
lowercase__ = d_inner
lowercase__ = hidden_act
lowercase__ = hidden_dropout
lowercase__ = attention_dropout
lowercase__ = activation_dropout
lowercase__ = max_position_embeddings
lowercase__ = type_vocab_size
lowercase__ = 2
lowercase__ = num_labels
lowercase__ = num_choices
lowercase__ = scope
lowercase__ = initializer_std
# Used in the tests to check the size of the first attention layer
lowercase__ = n_head
# Used in the tests to check the size of the first hidden state
lowercase__ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
lowercase__ = sum(self.block_sizes) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
lowercase__ = self.num_hidden_layers + 2
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
lowercase__ = None
if self.use_input_mask:
lowercase__ = random_attention_mask([self.batch_size, self.seq_length])
lowercase__ = None
if self.use_token_type_ids:
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
lowercase__ = None
lowercase__ = None
lowercase__ = None
if self.use_labels:
lowercase__ = ids_tensor([self.batch_size] , self.type_sequence_label_size)
lowercase__ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
lowercase__ = ids_tensor([self.batch_size] , self.num_choices)
lowercase__ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCAmelCase ( self : Dict , lowerCAmelCase : List[Any] , lowerCAmelCase : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Union[str, Any] , ) -> int:
"""simple docstring"""
lowercase__ = TFFunnelModel(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
lowercase__ = [input_ids, input_mask]
lowercase__ = model(lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model))
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Optional[Any] , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
lowercase__ = [input_ids, input_mask]
lowercase__ = model(lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model))
lowercase__ = False
lowercase__ = TFFunnelBaseModel(config=lowerCAmelCase)
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model))
def UpperCAmelCase ( self : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Dict , lowerCAmelCase : List[str] , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Tuple , ) -> str:
"""simple docstring"""
lowercase__ = TFFunnelForPreTraining(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = TFFunnelForMaskedLM(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def UpperCAmelCase ( self : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any] , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = TFFunnelForSequenceClassification(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def UpperCAmelCase ( self : List[Any] , lowerCAmelCase : str , lowerCAmelCase : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.num_choices
lowercase__ = TFFunnelForMultipleChoice(config=lowerCAmelCase)
lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1))
lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1))
lowercase__ = tf.tile(tf.expand_dims(lowerCAmelCase , 1) , (1, self.num_choices, 1))
lowercase__ = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def UpperCAmelCase ( self : List[str] , lowerCAmelCase : str , lowerCAmelCase : List[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : Any , ) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.num_labels
lowercase__ = TFFunnelForTokenClassification(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def UpperCAmelCase ( self : int , lowerCAmelCase : List[str] , lowerCAmelCase : int , lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : str , lowerCAmelCase : Tuple , lowerCAmelCase : Tuple , ) -> Optional[int]:
"""simple docstring"""
lowercase__ = TFFunnelForQuestionAnswering(config=lowerCAmelCase)
lowercase__ = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowercase__ = model(lowerCAmelCase)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def UpperCAmelCase ( self : Union[str, Any]) -> str:
"""simple docstring"""
lowercase__ = self.prepare_config_and_inputs()
(
(
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
), (
lowercase__
),
) = config_and_inputs
lowercase__ = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class UpperCAmelCase__( lowerCamelCase , lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : int = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
A : Dict = (
{
"feature-extraction": (TFFunnelBaseModel, TFFunnelModel),
"fill-mask": TFFunnelForMaskedLM,
"question-answering": TFFunnelForQuestionAnswering,
"text-classification": TFFunnelForSequenceClassification,
"token-classification": TFFunnelForTokenClassification,
"zero-shot": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
A : Optional[int] = False
A : Optional[int] = False
def UpperCAmelCase ( self : Tuple) -> str:
"""simple docstring"""
lowercase__ = TFFunnelModelTester(self)
lowercase__ = ConfigTester(self , config_class=lowerCAmelCase)
def UpperCAmelCase ( self : int) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCAmelCase)
def UpperCAmelCase ( self : int) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase)
def UpperCAmelCase ( self : Dict) -> Dict:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase)
@require_tf
class UpperCAmelCase__( lowerCamelCase , unittest.TestCase ):
'''simple docstring'''
A : Tuple = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
A : List[str] = False
A : int = False
def UpperCAmelCase ( self : Any) -> List[Any]:
"""simple docstring"""
lowercase__ = TFFunnelModelTester(self , base=lowerCAmelCase)
lowercase__ = ConfigTester(self , config_class=lowerCAmelCase)
def UpperCAmelCase ( self : Union[str, Any]) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase ( self : Tuple) -> int:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowerCAmelCase)
def UpperCAmelCase ( self : int) -> str:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase)
def UpperCAmelCase ( self : List[str]) -> Optional[Any]:
"""simple docstring"""
lowercase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase)
| 642 | 1 |
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[Any] = {
'''configuration_autoformer''': [
'''AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''AutoformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Any = [
'''AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''AutoformerForPrediction''',
'''AutoformerModel''',
'''AutoformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_autoformer import (
AUTOFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
AutoformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_autoformer import (
AUTOFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
AutoformerForPrediction,
AutoformerModel,
AutoformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 214 |
from __future__ import annotations
import os
from typing import Any
import requests
lowerCAmelCase : int = '''https://api.github.com'''
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
lowerCAmelCase : int = BASE_URL + '''/user'''
# https://github.com/settings/tokens
lowerCAmelCase : List[Any] = os.environ.get('''USER_TOKEN''', '''''')
def _lowercase ( __UpperCamelCase : str ):
snake_case__ = {
"""Authorization""": F'''token {auth_token}''',
"""Accept""": """application/vnd.github.v3+json""",
}
return requests.get(__UpperCamelCase , headers=__UpperCamelCase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f"""{key}: {value}""")
else:
raise ValueError('''\'USER_TOKEN\' field cannot be empty.''')
| 214 | 1 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class __A :
'''simple docstring'''
def __init__( self , _snake_case , _snake_case=13 , _snake_case=7 , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=True , _snake_case=99 , _snake_case=16 , _snake_case=36 , _snake_case=6 , _snake_case=6 , _snake_case=6 , _snake_case=37 , _snake_case="gelu" , _snake_case=0.1 , _snake_case=0.1 , _snake_case=512 , _snake_case=16 , _snake_case=2 , _snake_case=0.02 , _snake_case=3 , _snake_case=4 , _snake_case=None , ):
_lowerCAmelCase : Optional[Any] = parent
_lowerCAmelCase : Any = batch_size
_lowerCAmelCase : List[str] = seq_length
_lowerCAmelCase : Tuple = is_training
_lowerCAmelCase : Union[str, Any] = use_input_mask
_lowerCAmelCase : Optional[Any] = use_token_type_ids
_lowerCAmelCase : Tuple = use_labels
_lowerCAmelCase : Union[str, Any] = vocab_size
_lowerCAmelCase : str = embedding_size
_lowerCAmelCase : List[str] = hidden_size
_lowerCAmelCase : int = num_hidden_layers
_lowerCAmelCase : Union[str, Any] = num_hidden_groups
_lowerCAmelCase : Tuple = num_attention_heads
_lowerCAmelCase : str = intermediate_size
_lowerCAmelCase : Any = hidden_act
_lowerCAmelCase : List[str] = hidden_dropout_prob
_lowerCAmelCase : List[Any] = attention_probs_dropout_prob
_lowerCAmelCase : Optional[Any] = max_position_embeddings
_lowerCAmelCase : Any = type_vocab_size
_lowerCAmelCase : Optional[int] = type_sequence_label_size
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : Optional[int] = num_labels
_lowerCAmelCase : Optional[int] = num_choices
_lowerCAmelCase : Any = scope
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase : List[Any] = None
if self.use_input_mask:
_lowerCAmelCase : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
_lowerCAmelCase : List[Any] = None
if self.use_token_type_ids:
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : List[Any] = None
_lowerCAmelCase : Optional[int] = None
if self.use_labels:
_lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
_lowerCAmelCase : Union[str, Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def SCREAMING_SNAKE_CASE__ ( self ):
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
_lowerCAmelCase : Union[str, Any] = AlbertModel(config=_snake_case )
model.to(_snake_case )
model.eval()
_lowerCAmelCase : str = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case )
_lowerCAmelCase : List[str] = model(_snake_case , token_type_ids=_snake_case )
_lowerCAmelCase : Tuple = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
_lowerCAmelCase : str = AlbertForPreTraining(config=_snake_case )
model.to(_snake_case )
model.eval()
_lowerCAmelCase : Union[str, Any] = model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case , sentence_order_label=_snake_case , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
_lowerCAmelCase : str = AlbertForMaskedLM(config=_snake_case )
model.to(_snake_case )
model.eval()
_lowerCAmelCase : Optional[Any] = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
_lowerCAmelCase : Tuple = AlbertForQuestionAnswering(config=_snake_case )
model.to(_snake_case )
model.eval()
_lowerCAmelCase : Dict = model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , start_positions=_snake_case , end_positions=_snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
_lowerCAmelCase : List[Any] = self.num_labels
_lowerCAmelCase : Optional[Any] = AlbertForSequenceClassification(_snake_case )
model.to(_snake_case )
model.eval()
_lowerCAmelCase : List[str] = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
_lowerCAmelCase : Optional[int] = self.num_labels
_lowerCAmelCase : Dict = AlbertForTokenClassification(config=_snake_case )
model.to(_snake_case )
model.eval()
_lowerCAmelCase : Optional[int] = model(_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case , _snake_case ):
_lowerCAmelCase : Dict = self.num_choices
_lowerCAmelCase : Optional[int] = AlbertForMultipleChoice(config=_snake_case )
model.to(_snake_case )
model.eval()
_lowerCAmelCase : Tuple = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : int = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_lowerCAmelCase : Dict = model(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , labels=_snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : int = self.prepare_config_and_inputs()
(
_lowerCAmelCase
) : Any = config_and_inputs
_lowerCAmelCase : List[Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __A ( snake_case__ ,snake_case__ ,unittest.TestCase ):
'''simple docstring'''
a_ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
a_ = (
{
'''feature-extraction''': AlbertModel,
'''fill-mask''': AlbertForMaskedLM,
'''question-answering''': AlbertForQuestionAnswering,
'''text-classification''': AlbertForSequenceClassification,
'''token-classification''': AlbertForTokenClassification,
'''zero-shot''': AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ = True
def SCREAMING_SNAKE_CASE__ ( self , _snake_case , _snake_case , _snake_case=False ):
_lowerCAmelCase : List[str] = super()._prepare_for_class(_snake_case , _snake_case , return_labels=_snake_case )
if return_labels:
if model_class in get_values(_snake_case ):
_lowerCAmelCase : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_snake_case )
_lowerCAmelCase : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_snake_case )
return inputs_dict
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : List[str] = AlbertModelTester(self )
_lowerCAmelCase : Dict = ConfigTester(self , config_class=_snake_case , hidden_size=37 )
def SCREAMING_SNAKE_CASE__ ( self ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_snake_case )
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_lowerCAmelCase : Dict = type
self.model_tester.create_and_check_model(*_snake_case )
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_lowerCAmelCase : Dict = AlbertModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
@require_torch
class __A ( unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE__ ( self ):
_lowerCAmelCase : Optional[int] = AlbertModel.from_pretrained("albert-base-v2" )
_lowerCAmelCase : Tuple = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
_lowerCAmelCase : Tuple = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
_lowerCAmelCase : Tuple = model(_snake_case , attention_mask=_snake_case )[0]
_lowerCAmelCase : Union[str, Any] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _snake_case )
_lowerCAmelCase : Tuple = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _snake_case , atol=1E-4 ) )
| 719 | import argparse
import torch
from transformers import YosoConfig, YosoForMaskedLM
def UpperCamelCase_ ( lowerCAmelCase__ ):
"""simple docstring"""
if "model" in orig_key:
_lowerCAmelCase : Any = orig_key.replace("model." , "" )
if "norm1" in orig_key:
_lowerCAmelCase : Dict = orig_key.replace("norm1" , "attention.output.LayerNorm" )
if "norm2" in orig_key:
_lowerCAmelCase : List[Any] = orig_key.replace("norm2" , "output.LayerNorm" )
if "norm" in orig_key:
_lowerCAmelCase : List[str] = orig_key.replace("norm" , "LayerNorm" )
if "transformer" in orig_key:
_lowerCAmelCase : Union[str, Any] = orig_key.split("." )[0].split("_" )[-1]
_lowerCAmelCase : Union[str, Any] = orig_key.replace(f"""transformer_{layer_num}""" , f"""encoder.layer.{layer_num}""" )
if "mha.attn" in orig_key:
_lowerCAmelCase : Union[str, Any] = orig_key.replace("mha.attn" , "attention.self" )
if "mha" in orig_key:
_lowerCAmelCase : Any = orig_key.replace("mha" , "attention" )
if "W_q" in orig_key:
_lowerCAmelCase : int = orig_key.replace("W_q" , "self.query" )
if "W_k" in orig_key:
_lowerCAmelCase : Optional[Any] = orig_key.replace("W_k" , "self.key" )
if "W_v" in orig_key:
_lowerCAmelCase : Dict = orig_key.replace("W_v" , "self.value" )
if "ff1" in orig_key:
_lowerCAmelCase : Tuple = orig_key.replace("ff1" , "intermediate.dense" )
if "ff2" in orig_key:
_lowerCAmelCase : str = orig_key.replace("ff2" , "output.dense" )
if "ff" in orig_key:
_lowerCAmelCase : Dict = orig_key.replace("ff" , "output.dense" )
if "mlm_class" in orig_key:
_lowerCAmelCase : Union[str, Any] = orig_key.replace("mlm.mlm_class" , "cls.predictions.decoder" )
if "mlm" in orig_key:
_lowerCAmelCase : Union[str, Any] = orig_key.replace("mlm" , "cls.predictions.transform" )
if "cls" not in orig_key:
_lowerCAmelCase : Tuple = "yoso." + orig_key
return orig_key
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
_lowerCAmelCase : List[Any] = orig_state_dict.pop(lowerCAmelCase__ )
if ("pooler" in key) or ("sen_class" in key):
continue
else:
_lowerCAmelCase : int = val
_lowerCAmelCase : Dict = orig_state_dict["cls.predictions.decoder.bias"]
_lowerCAmelCase : Dict = torch.arange(lowerCAmelCase__ ).expand((1, -1) ) + 2
return orig_state_dict
def UpperCamelCase_ ( lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ ):
"""simple docstring"""
_lowerCAmelCase : Tuple = torch.load(lowerCAmelCase__ , map_location="cpu" )["model_state_dict"]
_lowerCAmelCase : List[Any] = YosoConfig.from_json_file(lowerCAmelCase__ )
_lowerCAmelCase : Optional[Any] = YosoForMaskedLM(lowerCAmelCase__ )
_lowerCAmelCase : Dict = convert_checkpoint_helper(config.max_position_embeddings , lowerCAmelCase__ )
print(model.load_state_dict(lowerCAmelCase__ ) )
model.eval()
model.save_pretrained(lowerCAmelCase__ )
print(f"""Checkpoint successfuly converted. Model saved at {pytorch_dump_path}""" )
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--pytorch_model_path", default=None, type=str, required=True, help="Path to YOSO pytorch checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The json file for YOSO model config.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
snake_case = parser.parse_args()
convert_yoso_checkpoint(args.pytorch_model_path, args.config_file, args.pytorch_dump_path)
| 587 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {
's-JoL/Open-Llama-V1': 'https://huggingface.co/s-JoL/Open-Llama-V1/blob/main/config.json',
}
class A_(snake_case_ ):
"""simple docstring"""
a_ : Dict = 'open-llama'
def __init__( self , A=10_0000 , A=4096 , A=1_1008 , A=32 , A=32 , A="silu" , A=2048 , A=0.0_2 , A=1E-6 , A=True , A=0 , A=1 , A=2 , A=False , A=True , A=0.1 , A=0.1 , A=True , A=True , A=None , **A , ):
_lowerCamelCase : Optional[int] = vocab_size
_lowerCamelCase : List[str] = max_position_embeddings
_lowerCamelCase : List[str] = hidden_size
_lowerCamelCase : Dict = intermediate_size
_lowerCamelCase : Dict = num_hidden_layers
_lowerCamelCase : Union[str, Any] = num_attention_heads
_lowerCamelCase : Optional[int] = hidden_act
_lowerCamelCase : Optional[int] = initializer_range
_lowerCamelCase : Union[str, Any] = rms_norm_eps
_lowerCamelCase : Union[str, Any] = use_cache
_lowerCamelCase : Dict = kwargs.pop(
'use_memorry_efficient_attention' , A )
_lowerCamelCase : int = hidden_dropout_prob
_lowerCamelCase : Tuple = attention_dropout_prob
_lowerCamelCase : Tuple = use_stable_embedding
_lowerCamelCase : List[Any] = shared_input_output_embedding
_lowerCamelCase : Optional[int] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=A , bos_token_id=A , eos_token_id=A , tie_word_embeddings=A , **A , )
def _lowerCAmelCase ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , A ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F"got {self.rope_scaling}" )
_lowerCamelCase : Any = self.rope_scaling.get('type' , A )
_lowerCamelCase : List[Any] = self.rope_scaling.get('factor' , A )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}" )
if rope_scaling_factor is None or not isinstance(A , A ) or rope_scaling_factor <= 1.0:
raise ValueError(F"`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}" )
| 437 |
import pickle
import unittest
import torch
from accelerate import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils import require_cpu
@require_cpu
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : List[str] ):
A_ = torch.nn.Linear(10 , 10 )
A_ = torch.optim.SGD(model.parameters() , 0.1 )
A_ = Accelerator()
A_ = accelerator.prepare(UpperCAmelCase )
try:
pickle.loads(pickle.dumps(UpperCAmelCase ) )
except Exception as e:
self.fail(f'''Accelerated optimizer pickling failed with {e}''' )
AcceleratorState._reset_state() | 86 | 0 |
"""simple docstring"""
import itertools
from dataclasses import dataclass
from typing import Optional
import pandas as pd
import pyarrow as pa
import datasets
from datasets.table import table_cast
@dataclass
class _UpperCAmelCase ( datasets.BuilderConfig):
_lowerCAmelCase : Dict = None
class _UpperCAmelCase ( datasets.ArrowBasedBuilder):
_lowerCAmelCase : Optional[Any] = PandasConfig
def _snake_case ( self : Dict ):
return datasets.DatasetInfo(features=self.config.features )
def _snake_case ( self : Optional[Any] , lowercase_ : str ):
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}" )
snake_case_ : Optional[Any] = dl_manager.download_and_extract(self.config.data_files )
if isinstance(_A , (str, list, tuple) ):
snake_case_ : Tuple = data_files
if isinstance(_A , _A ):
snake_case_ : List[str] = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
snake_case_ : List[str] = [dl_manager.iter_files(_A ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'''files''': files} )]
snake_case_ : Any = []
for split_name, files in data_files.items():
if isinstance(_A , _A ):
snake_case_ : int = [files]
# Use `dl_manager.iter_files` to skip hidden files in an extracted archive
snake_case_ : List[str] = [dl_manager.iter_files(_A ) for file in files]
splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'''files''': files} ) )
return splits
def _snake_case ( self : List[str] , lowercase_ : Optional[int] ):
if self.config.features is not None:
# more expensive cast to support nested features with keys in a different order
# allows str <-> int/float or str to Audio for example
snake_case_ : Optional[int] = table_cast(_A , self.config.features.arrow_schema )
return pa_table
def _snake_case ( self : Optional[Any] , lowercase_ : Optional[Any] ):
for i, file in enumerate(itertools.chain.from_iterable(_A ) ):
with open(_A , '''rb''' ) as f:
snake_case_ : List[Any] = pa.Table.from_pandas(pd.read_pickle(_A ) )
yield i, self._cast_table(_A )
| 716 |
"""simple docstring"""
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
lowercase__ : Dict = logging.get_logger(__name__)
lowercase__ : List[str] = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''encoder.layer_norm_for_extract''': '''layer_norm_for_extract''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''label_embs_concat''': '''label_embeddings_concat''',
'''mask_emb''': '''masked_spec_embed''',
'''spk_proj''': '''speaker_proj''',
}
lowercase__ : Dict = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''label_embeddings_concat''',
'''speaker_proj''',
'''layer_norm_for_extract''',
]
def __lowercase ( _a , _a , _a , _a , _a ):
for attribute in key.split('''.''' ):
snake_case_ : Optional[Any] = getattr(_a , _a )
if weight_type is not None:
snake_case_ : Optional[int] = getattr(_a , _a ).shape
else:
snake_case_ : Optional[int] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
snake_case_ : Dict = value
elif weight_type == "weight_g":
snake_case_ : Tuple = value
elif weight_type == "weight_v":
snake_case_ : Tuple = value
elif weight_type == "bias":
snake_case_ : int = value
else:
snake_case_ : Optional[Any] = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __lowercase ( _a , _a ):
snake_case_ : Optional[int] = []
snake_case_ : Optional[Any] = fairseq_model.state_dict()
snake_case_ : Optional[int] = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
snake_case_ : int = False
if "conv_layers" in name:
load_conv_layer(
_a , _a , _a , _a , hf_model.config.feat_extract_norm == '''group''' , )
snake_case_ : Any = True
else:
for key, mapped_key in MAPPING.items():
snake_case_ : Tuple = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key):
# special case since naming is very similar
continue
snake_case_ : List[str] = True
if "*" in mapped_key:
snake_case_ : List[Any] = name.split(_a )[0].split('''.''' )[-2]
snake_case_ : Dict = mapped_key.replace('''*''' , _a )
if "weight_g" in name:
snake_case_ : Any = '''weight_g'''
elif "weight_v" in name:
snake_case_ : List[str] = '''weight_v'''
elif "bias" in name:
snake_case_ : List[str] = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case_ : Optional[int] = '''weight'''
else:
snake_case_ : List[Any] = None
set_recursively(_a , _a , _a , _a , _a )
continue
if not is_used:
unused_weights.append(_a )
logger.warning(f"Unused weights: {unused_weights}" )
def __lowercase ( _a , _a , _a , _a , _a ):
snake_case_ : Tuple = full_name.split('''conv_layers.''' )[-1]
snake_case_ : int = name.split('''.''' )
snake_case_ : int = int(items[0] )
snake_case_ : int = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
snake_case_ : int = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
snake_case_ : str = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.bias.data.shape} was found." )
snake_case_ : Dict = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found." )
snake_case_ : Optional[Any] = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_a )
@torch.no_grad()
def __lowercase ( _a , _a , _a=None , _a=None , _a=True ):
if config_path is not None:
snake_case_ : int = UniSpeechSatConfig.from_pretrained(_a )
else:
snake_case_ : Tuple = UniSpeechSatConfig()
snake_case_ : List[Any] = ''''''
if is_finetuned:
snake_case_ : str = UniSpeechSatForCTC(_a )
else:
snake_case_ : Dict = UniSpeechSatForPreTraining(_a )
snake_case_, snake_case_, snake_case_ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
snake_case_ : Dict = model[0].eval()
recursively_load_weights(_a , _a )
hf_wavavec.save_pretrained(_a )
if __name__ == "__main__":
lowercase__ : List[Any] = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
lowercase__ : Optional[int] = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 485 | 0 |
'''simple docstring'''
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
SCREAMING_SNAKE_CASE__ = 5_0_0_0_0_0
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = os.path.split(__file__)
SCREAMING_SNAKE_CASE__ = os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def lowercase__ ( __UpperCamelCase , **__UpperCamelCase )-> int:
UpperCamelCase = dataset.map(**__UpperCamelCase )
@get_duration
def lowercase__ ( __UpperCamelCase , **__UpperCamelCase )-> Any:
UpperCamelCase = dataset.filter(**__UpperCamelCase )
def lowercase__ ( )-> List[Any]:
UpperCamelCase = {"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
UpperCamelCase = datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
UpperCamelCase = generate_example_dataset(
os.path.join(__UpperCamelCase , """dataset.arrow""" ) , __UpperCamelCase , num_examples=__UpperCamelCase )
UpperCamelCase = transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=__UpperCamelCase )
def tokenize(__UpperCamelCase ):
return tokenizer(examples["""text"""] )
UpperCamelCase = map(__UpperCamelCase )
UpperCamelCase = map(__UpperCamelCase , batched=__UpperCamelCase )
UpperCamelCase = map(__UpperCamelCase , function=lambda __UpperCamelCase : None , batched=__UpperCamelCase )
with dataset.formatted_as(type="""numpy""" ):
UpperCamelCase = map(__UpperCamelCase , function=lambda __UpperCamelCase : None , batched=__UpperCamelCase )
with dataset.formatted_as(type="""pandas""" ):
UpperCamelCase = map(__UpperCamelCase , function=lambda __UpperCamelCase : None , batched=__UpperCamelCase )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
UpperCamelCase = map(__UpperCamelCase , function=lambda __UpperCamelCase : None , batched=__UpperCamelCase )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
UpperCamelCase = map(__UpperCamelCase , function=lambda __UpperCamelCase : None , batched=__UpperCamelCase )
UpperCamelCase = map(__UpperCamelCase , function=__UpperCamelCase , batched=__UpperCamelCase )
UpperCamelCase = filter(__UpperCamelCase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(__UpperCamelCase , """wb""" ) as f:
f.write(json.dumps(__UpperCamelCase ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 301 |
'''simple docstring'''
import argparse
import torch
from transformers import OpenAIGPTConfig, OpenAIGPTModel, load_tf_weights_in_openai_gpt
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
def lowercase__ ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> List[str]:
# Construct model
if openai_config_file == "":
UpperCamelCase = OpenAIGPTConfig()
else:
UpperCamelCase = OpenAIGPTConfig.from_json_file(__UpperCamelCase )
UpperCamelCase = OpenAIGPTModel(__UpperCamelCase )
# Load weights from numpy
load_tf_weights_in_openai_gpt(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# Save pytorch-model
UpperCamelCase = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
UpperCamelCase = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(F"Save PyTorch model to {pytorch_weights_dump_path}" )
torch.save(model.state_dict() , __UpperCamelCase )
print(F"Save configuration file to {pytorch_config_dump_path}" )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--openai_checkpoint_folder_path',
default=None,
type=str,
required=True,
help='Path to the TensorFlow checkpoint path.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
parser.add_argument(
'--openai_config_file',
default='',
type=str,
help=(
'An optional config json file corresponding to the pre-trained OpenAI model. \n'
'This specifies the model architecture.'
),
)
SCREAMING_SNAKE_CASE__ = parser.parse_args()
convert_openai_checkpoint_to_pytorch(
args.openai_checkpoint_folder_path, args.openai_config_file, args.pytorch_dump_folder_path
)
| 301 | 1 |
"""simple docstring"""
from __future__ import annotations
def __magic_name__ ( __snake_case : list[float] ) -> bool:
if len(__snake_case ) < 2:
raise ValueError("Monogons and Digons are not polygons in the Euclidean space" )
if any(i <= 0 for i in nums ):
raise ValueError("All values must be greater than 0" )
lowercase : Optional[int] = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 518 |
"""simple docstring"""
import os
import jsonlines
import numpy as np
from tqdm import tqdm
_A : int = 20_48
_A : List[Any] = 40_96
_A : Any = 42
_A : List[Any] = os.environ.pop("""PROCESS_TRAIN""", """false""")
_A : Union[str, Any] = {"""null""": 0, """short""": 1, """long""": 2, """yes""": 3, """no""": 4}
def __magic_name__ ( __snake_case : Dict ) -> Optional[Any]:
def choose_first(__snake_case : Any , __snake_case : str=False ):
assert isinstance(__snake_case , __snake_case )
if len(__snake_case ) == 1:
lowercase : List[Any] = answer[0]
return {k: [answer[k]] for k in answer} if is_long_answer else answer
for a in answer:
if is_long_answer:
lowercase : Any = {k: [a[k]] for k in a}
if len(a["start_token"] ) > 0:
break
return a
lowercase : Any = {"id": example["id"]}
lowercase : List[str] = example["annotations"]
lowercase : Optional[int] = annotation["yes_no_answer"]
if 0 in yes_no_answer or 1 in yes_no_answer:
lowercase : Optional[int] = ["yes"] if 1 in yes_no_answer else ["no"]
lowercase : List[Any] = []
lowercase : Dict = []
lowercase : str = ["<cls>"]
else:
lowercase : int = ["short"]
lowercase : Optional[int] = choose_first(annotation["short_answers"] )
if len(out["start_token"] ) == 0:
# answer will be long if short is not available
lowercase : Dict = ["long"]
lowercase : Optional[int] = choose_first(annotation["long_answer"] , is_long_answer=__snake_case )
lowercase : int = []
answer.update(__snake_case )
# disregard some samples
if len(answer["start_token"] ) > 1 or answer["start_token"] == answer["end_token"]:
lowercase : str = True
else:
lowercase : List[str] = False
lowercase : Optional[Any] = ["start_token", "end_token", "start_byte", "end_byte", "text"]
if not all(isinstance(answer[k] , __snake_case ) for k in cols ):
raise ValueError("Issue in ID" , example["id"] )
return answer
def __magic_name__ ( __snake_case : Union[str, Any] , __snake_case : Tuple=False ) -> Union[str, Any]:
lowercase : Tuple = _get_single_answer(__snake_case )
# bytes are of no use
del answer["start_byte"]
del answer["end_byte"]
# handle yes_no answers explicitly
if answer["category"][0] in ["yes", "no"]: # category is list with one element
lowercase : Any = example["document"]["tokens"]
lowercase : List[str] = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
return {
"context": " ".join(__snake_case ),
"answer": {
"start_token": -100, # ignore index in cross-entropy
"end_token": -100, # ignore index in cross-entropy
"category": answer["category"],
"span": answer["category"], # extra
},
}
# later, help in removing all no answers
if answer["start_token"] == [-1]:
return {
"context": "None",
"answer": {
"start_token": -1,
"end_token": -1,
"category": "null",
"span": "None", # extra
},
}
# handling normal samples
lowercase : List[Any] = ["start_token", "end_token"]
answer.update({k: answer[k][0] if len(answer[k] ) > 0 else answer[k] for k in cols} ) # e.g. [10] == 10
lowercase : Optional[int] = example["document"]["tokens"]
lowercase : Union[str, Any] = answer["start_token"]
lowercase : List[str] = answer["end_token"]
lowercase : int = []
for i in range(len(doc["token"] ) ):
if not doc["is_html"][i]:
context.append(doc["token"][i] )
else:
if answer["start_token"] > i:
start_token -= 1
if answer["end_token"] > i:
end_token -= 1
lowercase : Dict = " ".join(context[start_token:end_token] )
# checking above code
if assertion:
lowercase : List[str] = doc["is_html"][answer["start_token"] : answer["end_token"]]
lowercase : Any = doc["token"][answer["start_token"] : answer["end_token"]]
lowercase : Dict = " ".join([old[i] for i in range(len(__snake_case ) ) if not is_html[i]] )
if new != old:
print("ID:" , example["id"] )
print("New:" , __snake_case , end="\n" )
print("Old:" , __snake_case , end="\n\n" )
return {
"context": " ".join(__snake_case ),
"answer": {
"start_token": start_token,
"end_token": end_token - 1, # this makes it inclusive
"category": answer["category"], # either long or short
"span": new, # extra
},
}
def __magic_name__ ( __snake_case : Optional[int] , __snake_case : Optional[int] , __snake_case : int=2048 , __snake_case : Optional[Any]=4096 , __snake_case : int=True ) -> Tuple:
# overlap will be of doc_stride - q_len
lowercase : List[Any] = get_context_and_ans(__snake_case , assertion=__snake_case )
lowercase : List[Any] = out["answer"]
# later, removing these samples
if answer["start_token"] == -1:
return {
"example_id": example["id"],
"input_ids": [[-1]],
"labels": {
"start_token": [-1],
"end_token": [-1],
"category": ["null"],
},
}
lowercase : Tuple = tokenizer(example["question"]["text"] , out["context"] ).input_ids
lowercase : Optional[int] = input_ids.index(tokenizer.sep_token_id ) + 1
# return yes/no
if answer["category"][0] in ["yes", "no"]: # category is list with one element
lowercase : List[str] = []
lowercase : Optional[int] = []
lowercase : Any = input_ids[:q_len]
lowercase : List[Any] = range(__snake_case , len(__snake_case ) , max_length - doc_stride )
for i in doc_start_indices:
lowercase : List[Any] = i + max_length - q_len
lowercase : Dict = input_ids[i:end_index]
inputs.append(q_indices + slice )
category.append(answer["category"][0] )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": [-100] * len(__snake_case ),
"end_token": [-100] * len(__snake_case ),
"category": category,
},
}
lowercase : List[str] = out["context"].split()
lowercase : Tuple = splitted_context[answer["end_token"]]
lowercase : List[str] = len(
tokenizer(
" ".join(splitted_context[: answer["start_token"]] ) , add_special_tokens=__snake_case , ).input_ids )
lowercase : Tuple = len(
tokenizer(" ".join(splitted_context[: answer["end_token"]] ) , add_special_tokens=__snake_case ).input_ids )
answer["start_token"] += q_len
answer["end_token"] += q_len
# fixing end token
lowercase : List[str] = len(tokenizer(__snake_case , add_special_tokens=__snake_case ).input_ids )
if num_sub_tokens > 1:
answer["end_token"] += num_sub_tokens - 1
lowercase : Optional[Any] = input_ids[answer["start_token"] : answer["end_token"] + 1] # right & left are inclusive
lowercase : Tuple = answer["start_token"]
lowercase : Optional[Any] = answer["end_token"]
if assertion:
lowercase : str = tokenizer.decode(__snake_case )
if answer["span"] != new:
print("ISSUE IN TOKENIZATION" )
print("OLD:" , answer["span"] )
print("NEW:" , __snake_case , end="\n\n" )
if len(__snake_case ) <= max_length:
return {
"example_id": example["id"],
"input_ids": [input_ids],
"labels": {
"start_token": [answer["start_token"]],
"end_token": [answer["end_token"]],
"category": answer["category"],
},
}
lowercase : Dict = input_ids[:q_len]
lowercase : Any = range(__snake_case , len(__snake_case ) , max_length - doc_stride )
lowercase : List[str] = []
lowercase : Any = []
lowercase : Dict = []
lowercase : Tuple = [] # null, yes, no, long, short
for i in doc_start_indices:
lowercase : List[str] = i + max_length - q_len
lowercase : List[Any] = input_ids[i:end_index]
inputs.append(q_indices + slice )
assert len(inputs[-1] ) <= max_length, "Issue in truncating length"
if start_token >= i and end_token <= end_index - 1:
lowercase : List[Any] = start_token - i + q_len
lowercase : str = end_token - i + q_len
answers_category.append(answer["category"][0] ) # ["short"] -> "short"
else:
lowercase : List[Any] = -100
lowercase : Optional[int] = -100
answers_category.append("null" )
lowercase : Optional[Any] = inputs[-1][start_token : end_token + 1]
answers_start_token.append(__snake_case )
answers_end_token.append(__snake_case )
if assertion:
if new != old and new != [tokenizer.cls_token_id]:
print("ISSUE in strided for ID:" , example["id"] )
print("New:" , tokenizer.decode(__snake_case ) )
print("Old:" , tokenizer.decode(__snake_case ) , end="\n\n" )
if slice[-1] == tokenizer.sep_token_id:
break
return {
"example_id": example["id"],
"input_ids": inputs,
"labels": {
"start_token": answers_start_token,
"end_token": answers_end_token,
"category": answers_category,
},
}
def __magic_name__ ( __snake_case : str , __snake_case : str , __snake_case : Optional[Any]=2048 , __snake_case : Optional[int]=4096 , __snake_case : int=False ) -> List[str]:
lowercase : List[str] = get_strided_contexts_and_ans(
__snake_case , __snake_case , doc_stride=__snake_case , max_length=__snake_case , assertion=__snake_case , )
return example
def __magic_name__ ( __snake_case : Union[str, Any] , __snake_case : List[Any] ) -> Union[str, Any]:
with jsonlines.open(__snake_case , "a" ) as writer:
for example in tqdm(__snake_case , total=len(__snake_case ) , desc="Saving samples ... " ):
lowercase : List[str] = example["labels"]
for ids, start, end, cat in zip(
example["input_ids"] , labels["start_token"] , labels["end_token"] , labels["category"] , ):
if start == -1 and end == -1:
continue # leave waste samples with no answer
if cat == "null" and np.random.rand() < 0.6:
continue # removing 50 % samples
writer.write(
{
"input_ids": ids,
"start_token": start,
"end_token": end,
"category": CATEGORY_MAPPING[cat],
} )
if __name__ == "__main__":
from datasets import load_dataset
from transformers import BigBirdTokenizer
_A : Union[str, Any] = load_dataset("""natural_questions""")
_A : Union[str, Any] = BigBirdTokenizer.from_pretrained("""google/bigbird-roberta-base""")
_A : Dict = data["""train""" if PROCESS_TRAIN == """true""" else """validation"""]
_A : int = {
"""tokenizer""": tokenizer,
"""doc_stride""": DOC_STRIDE,
"""max_length""": MAX_LENGTH,
"""assertion""": False,
}
_A : List[str] = data.map(prepare_inputs, fn_kwargs=fn_kwargs)
_A : List[Any] = data.remove_columns(["""annotations""", """document""", """id""", """question"""])
print(data)
np.random.seed(SEED)
_A : str = """nq-training.jsonl""" if PROCESS_TRAIN == """true""" else """nq-validation.jsonl"""
save_to_disk(data, file_name=cache_file_name)
| 518 | 1 |
'''simple docstring'''
import math
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : Tuple = logging.get_logger(__name__)
_a : List[Any] = {
'facebook/data2vec-base-960h': 'https://huggingface.co/facebook/data2vec-audio-base-960h/resolve/main/config.json',
# See all Data2VecAudio models at https://huggingface.co/models?filter=data2vec-audio
}
class lowercase_ ( a ):
'''simple docstring'''
__lowerCAmelCase : str = "data2vec-audio"
def __init__( self , a_=3_2 , a_=7_6_8 , a_=1_2 , a_=1_2 , a_=3_0_7_2 , a_="gelu" , a_=0.1 , a_=0.1 , a_=0.1 , a_=0.0 , a_=0.1 , a_=0.1 , a_=0.02 , a_=1E-5 , a_="gelu" , a_=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , a_=(5, 2, 2, 2, 2, 2, 2) , a_=(1_0, 3, 3, 3, 3, 2, 2) , a_=False , a_=1_6 , a_=1_9 , a_=5 , a_=0.05 , a_=1_0 , a_=2 , a_=0.0 , a_=1_0 , a_=0 , a_="sum" , a_=False , a_=False , a_=2_5_6 , a_=(5_1_2, 5_1_2, 5_1_2, 5_1_2, 1_5_0_0) , a_=(5, 3, 3, 1, 1) , a_=(1, 2, 3, 1, 1) , a_=5_1_2 , a_=0 , a_=1 , a_=2 , a_=False , a_=3 , a_=2 , a_=3 , a_=None , **a_ , ) -> List[str]:
"""simple docstring"""
super().__init__(**a_ , pad_token_id=a_ , bos_token_id=a_ , eos_token_id=a_ )
UpperCAmelCase = hidden_size
UpperCAmelCase = feat_extract_activation
UpperCAmelCase = list(a_ )
UpperCAmelCase = list(a_ )
UpperCAmelCase = list(a_ )
UpperCAmelCase = conv_bias
UpperCAmelCase = num_conv_pos_embeddings
UpperCAmelCase = num_conv_pos_embedding_groups
UpperCAmelCase = conv_pos_kernel_size
UpperCAmelCase = len(self.conv_dim )
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = intermediate_size
UpperCAmelCase = hidden_act
UpperCAmelCase = num_attention_heads
UpperCAmelCase = hidden_dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = activation_dropout
UpperCAmelCase = feat_proj_dropout
UpperCAmelCase = final_dropout
UpperCAmelCase = layerdrop
UpperCAmelCase = layer_norm_eps
UpperCAmelCase = initializer_range
UpperCAmelCase = vocab_size
UpperCAmelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F''' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'''
F''' `len(config.conv_kernel) = {len(self.conv_kernel )}`.''' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCAmelCase = mask_time_prob
UpperCAmelCase = mask_time_length
UpperCAmelCase = mask_time_min_masks
UpperCAmelCase = mask_feature_prob
UpperCAmelCase = mask_feature_length
UpperCAmelCase = mask_feature_min_masks
# ctc loss
UpperCAmelCase = ctc_loss_reduction
UpperCAmelCase = ctc_zero_infinity
# adapter
UpperCAmelCase = add_adapter
UpperCAmelCase = adapter_kernel_size
UpperCAmelCase = adapter_stride
UpperCAmelCase = num_adapter_layers
UpperCAmelCase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCAmelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCAmelCase = list(a_ )
UpperCAmelCase = list(a_ )
UpperCAmelCase = list(a_ )
UpperCAmelCase = xvector_output_dim
@property
def snake_case_ ( self ) -> List[str]:
"""simple docstring"""
return math.prod(self.conv_stride )
| 447 |
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class lowercase_ ( nn.Module ):
'''simple docstring'''
def __init__( self , a_ = 1_6 , a_ = 8_8 , a_ = None , a_ = 1 , a_ = 0.0 , a_ = 3_2 , a_ = None , a_ = False , a_ = None , a_ = None , a_ = "geglu" , a_ = None , ) -> List[str]:
"""simple docstring"""
super().__init__()
UpperCAmelCase = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=a_ , attention_head_dim=a_ , in_channels=a_ , num_layers=a_ , dropout=a_ , norm_num_groups=a_ , cross_attention_dim=a_ , attention_bias=a_ , sample_size=a_ , num_vector_embeds=a_ , activation_fn=a_ , num_embeds_ada_norm=a_ , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
UpperCAmelCase = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
UpperCAmelCase = [7_7, 2_5_7]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
UpperCAmelCase = [1, 0]
def snake_case_ ( self , a_ , a_ , a_=None , a_=None , a_=None , a_ = True , ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase = hidden_states
UpperCAmelCase = []
UpperCAmelCase = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
UpperCAmelCase = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
UpperCAmelCase = self.transformer_index_for_condition[i]
UpperCAmelCase = self.transformers[transformer_index](
a_ , encoder_hidden_states=a_ , timestep=a_ , cross_attention_kwargs=a_ , return_dict=a_ , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
UpperCAmelCase = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
UpperCAmelCase = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=a_ )
| 447 | 1 |
import re
import time
from typing import Optional
import IPython.display as disp
from ..trainer_callback import TrainerCallback
from ..trainer_utils import IntervalStrategy, has_length
def lowercase__ ( __A: Any ):
'''simple docstring'''
__magic_name__ : Any = int(__A )
__magic_name__ : int = t // 3_6_0_0, (t // 6_0) % 6_0, t % 6_0
return F'''{h}:{m:02d}:{s:02d}''' if h != 0 else F'''{m:02d}:{s:02d}'''
def lowercase__ ( __A: str ,__A: List[Any] ,__A: Optional[int] ,__A: str ,__A: Dict=3_0_0 ):
'''simple docstring'''
return F'''
<div>
{prefix}
<progress value=\'{value}\' max=\'{total}\' style=\'width:{width}px; height:20px; vertical-align: middle;\'></progress>
{label}
</div>
'''
def lowercase__ ( __A: List[Any] ):
'''simple docstring'''
__magic_name__ : Optional[Any] = '''<table border="1" class="dataframe">\n'''
html_code += """ <thead>\n <tr style="text-align: left;">\n"""
for i in items[0]:
html_code += F''' <th>{i}</th>\n'''
html_code += " </tr>\n </thead>\n <tbody>\n"
for line in items[1:]:
html_code += " <tr>\n"
for elt in line:
__magic_name__ : List[str] = F'''{elt:.6f}''' if isinstance(__A ,__A ) else str(__A )
html_code += F''' <td>{elt}</td>\n'''
html_code += " </tr>\n"
html_code += " </tbody>\n</table><p>"
return html_code
class lowerCamelCase :
'''simple docstring'''
UpperCamelCase__ =5
UpperCamelCase__ =0.2
def __init__( self : Union[str, Any] , lowerCamelCase_ : int , lowerCamelCase_ : Optional[str] = None , lowerCamelCase_ : bool = True , lowerCamelCase_ : Optional["NotebookTrainingTracker"] = None , lowerCamelCase_ : int = 300 , ) -> Optional[Any]:
__magic_name__ : Any = total
__magic_name__ : int = '''''' if prefix is None else prefix
__magic_name__ : Any = leave
__magic_name__ : str = parent
__magic_name__ : Optional[int] = width
__magic_name__ : Union[str, Any] = None
__magic_name__ : Any = None
__magic_name__ : int = None
def UpperCAmelCase__ ( self : Tuple , lowerCamelCase_ : int , lowerCamelCase_ : bool = False , lowerCamelCase_ : str = None ) -> Optional[Any]:
__magic_name__ : Tuple = value
if comment is not None:
__magic_name__ : Tuple = comment
if self.last_value is None:
__magic_name__ : Dict = time.time()
__magic_name__ : List[Any] = value
__magic_name__ : Tuple = None
__magic_name__ : List[str] = self.warmup
__magic_name__ : List[Any] = 1
self.update_bar(lowerCamelCase_ )
elif value <= self.last_value and not force_update:
return
elif force_update or self.first_calls > 0 or value >= min(self.last_value + self.wait_for , self.total ):
if self.first_calls > 0:
self.first_calls -= 1
__magic_name__ : List[Any] = time.time()
__magic_name__ : Optional[Any] = current_time - self.start_time
# We could have value = self.start_value if the update is called twixe with the same start value.
if value > self.start_value:
__magic_name__ : List[str] = self.elapsed_time / (value - self.start_value)
else:
__magic_name__ : Optional[Any] = None
if value >= self.total:
__magic_name__ : Optional[int] = self.total
__magic_name__ : Optional[Any] = None
if not self.leave:
self.close()
elif self.average_time_per_item is not None:
__magic_name__ : str = self.average_time_per_item * (self.total - value)
self.update_bar(lowerCamelCase_ )
__magic_name__ : Optional[int] = value
__magic_name__ : List[str] = current_time
if self.average_time_per_item is None:
__magic_name__ : Tuple = 1
else:
__magic_name__ : str = max(int(self.update_every / self.average_time_per_item ) , 1 )
def UpperCAmelCase__ ( self : List[str] , lowerCamelCase_ : Any , lowerCamelCase_ : List[Any]=None ) -> str:
__magic_name__ : str = ''' ''' * (len(str(self.total ) ) - len(str(lowerCamelCase_ ) )) + str(lowerCamelCase_ )
if self.elapsed_time is None:
__magic_name__ : Optional[int] = F'''[{spaced_value}/{self.total} : < :'''
elif self.predicted_remaining is None:
__magic_name__ : Optional[Any] = F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )}'''
else:
__magic_name__ : Dict = (
F'''[{spaced_value}/{self.total} {format_time(self.elapsed_time )} <'''
F''' {format_time(self.predicted_remaining )}'''
)
self.label += F''', {1/self.average_time_per_item:.2f} it/s'''
self.label += "]" if self.comment is None or len(self.comment ) == 0 else F''', {self.comment}]'''
self.display()
def UpperCAmelCase__ ( self : List[str] ) -> str:
__magic_name__ : List[Any] = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.parent is not None:
# If this is a child bar, the parent will take care of the display.
self.parent.display()
return
if self.output is None:
__magic_name__ : List[Any] = disp.display(disp.HTML(self.html_code ) , display_id=lowerCamelCase_ )
else:
self.output.update(disp.HTML(self.html_code ) )
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
if self.parent is None and self.output is not None:
self.output.update(disp.HTML('''''' ) )
class lowerCamelCase ( _lowerCamelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : List[str]=None ) -> str:
super().__init__(lowerCamelCase_ )
__magic_name__ : int = None if column_names is None else [column_names]
__magic_name__ : str = None
def UpperCAmelCase__ ( self : int ) -> Any:
__magic_name__ : Any = html_progress_bar(self.value , self.total , self.prefix , self.label , self.width )
if self.inner_table is not None:
self.html_code += text_to_html_table(self.inner_table )
if self.child_bar is not None:
self.html_code += self.child_bar.html_code
if self.output is None:
__magic_name__ : List[Any] = disp.display(disp.HTML(self.html_code ) , display_id=lowerCamelCase_ )
else:
self.output.update(disp.HTML(self.html_code ) )
def UpperCAmelCase__ ( self : Union[str, Any] , lowerCamelCase_ : Dict ) -> Optional[int]:
if self.inner_table is None:
__magic_name__ : int = [list(values.keys() ), list(values.values() )]
else:
__magic_name__ : Any = self.inner_table[0]
if len(self.inner_table ) == 1:
# We give a chance to update the column names at the first iteration
for key in values.keys():
if key not in columns:
columns.append(lowerCamelCase_ )
__magic_name__ : Union[str, Any] = columns
self.inner_table.append([values[c] for c in columns] )
def UpperCAmelCase__ ( self : Optional[Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : str=None , lowerCamelCase_ : Any=300 ) -> Any:
__magic_name__ : Union[str, Any] = NotebookProgressBar(lowerCamelCase_ , prefix=lowerCamelCase_ , parent=self , width=lowerCamelCase_ )
return self.child_bar
def UpperCAmelCase__ ( self : List[str] ) -> Union[str, Any]:
__magic_name__ : str = None
self.display()
class lowerCamelCase ( _lowerCamelCase ):
'''simple docstring'''
def __init__( self : Tuple ) -> Union[str, Any]:
__magic_name__ : int = None
__magic_name__ : Dict = None
__magic_name__ : List[str] = False
def UpperCAmelCase__ ( self : List[Any] , lowerCamelCase_ : Any , lowerCamelCase_ : str , lowerCamelCase_ : List[Any] , **lowerCamelCase_ : Dict ) -> int:
__magic_name__ : Any = '''Epoch''' if args.evaluation_strategy == IntervalStrategy.EPOCH else '''Step'''
__magic_name__ : Tuple = 0
__magic_name__ : Dict = 0
__magic_name__ : List[Any] = [self.first_column] + ['''Training Loss''']
if args.evaluation_strategy != IntervalStrategy.NO:
column_names.append('''Validation Loss''' )
__magic_name__ : Union[str, Any] = NotebookTrainingTracker(state.max_steps , lowerCamelCase_ )
def UpperCAmelCase__ ( self : Any , lowerCamelCase_ : Optional[int] , lowerCamelCase_ : int , lowerCamelCase_ : Any , **lowerCamelCase_ : List[Any] ) -> Any:
__magic_name__ : str = int(state.epoch ) if int(state.epoch ) == state.epoch else F'''{state.epoch:.2f}'''
self.training_tracker.update(
state.global_step + 1 , comment=F'''Epoch {epoch}/{state.num_train_epochs}''' , force_update=self._force_next_update , )
__magic_name__ : int = False
def UpperCAmelCase__ ( self : List[str] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Tuple=None , **lowerCamelCase_ : Dict ) -> Any:
if not has_length(lowerCamelCase_ ):
return
if self.prediction_bar is None:
if self.training_tracker is not None:
__magic_name__ : Tuple = self.training_tracker.add_child(len(lowerCamelCase_ ) )
else:
__magic_name__ : Tuple = NotebookProgressBar(len(lowerCamelCase_ ) )
self.prediction_bar.update(1 )
else:
self.prediction_bar.update(self.prediction_bar.value + 1 )
def UpperCAmelCase__ ( self : Optional[int] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : int , lowerCamelCase_ : str , **lowerCamelCase_ : Union[str, Any] ) -> List[str]:
if self.prediction_bar is not None:
self.prediction_bar.close()
__magic_name__ : Any = None
def UpperCAmelCase__ ( self : Tuple , lowerCamelCase_ : Union[str, Any] , lowerCamelCase_ : List[str] , lowerCamelCase_ : Tuple , lowerCamelCase_ : Optional[Any]=None , **lowerCamelCase_ : List[str] ) -> str:
# Only for when there is no evaluation
if args.evaluation_strategy == IntervalStrategy.NO and "loss" in logs:
__magic_name__ : Union[str, Any] = {'''Training Loss''': logs['''loss''']}
# First column is necessarily Step sine we're not in epoch eval strategy
__magic_name__ : str = state.global_step
self.training_tracker.write_line(lowerCamelCase_ )
def UpperCAmelCase__ ( self : List[str] , lowerCamelCase_ : List[Any] , lowerCamelCase_ : Dict , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[int]=None , **lowerCamelCase_ : Union[str, Any] ) -> Dict:
if self.training_tracker is not None:
__magic_name__ : List[str] = {'''Training Loss''': '''No log''', '''Validation Loss''': '''No log'''}
for log in reversed(state.log_history ):
if "loss" in log:
__magic_name__ : str = log['''loss''']
break
if self.first_column == "Epoch":
__magic_name__ : Optional[int] = int(state.epoch )
else:
__magic_name__ : Optional[Any] = state.global_step
__magic_name__ : int = '''eval'''
for k in metrics:
if k.endswith('''_loss''' ):
__magic_name__ : Tuple = re.sub(R'''\_loss$''' , '''''' , lowerCamelCase_ )
__magic_name__ : int = metrics.pop('''total_flos''' , lowerCamelCase_ )
__magic_name__ : Tuple = metrics.pop('''epoch''' , lowerCamelCase_ )
__magic_name__ : Optional[Any] = metrics.pop(F'''{metric_key_prefix}_runtime''' , lowerCamelCase_ )
__magic_name__ : List[Any] = metrics.pop(F'''{metric_key_prefix}_samples_per_second''' , lowerCamelCase_ )
__magic_name__ : Optional[int] = metrics.pop(F'''{metric_key_prefix}_steps_per_second''' , lowerCamelCase_ )
__magic_name__ : str = metrics.pop(F'''{metric_key_prefix}_jit_compilation_time''' , lowerCamelCase_ )
for k, v in metrics.items():
if k == F'''{metric_key_prefix}_loss''':
__magic_name__ : List[Any] = v
else:
__magic_name__ : Optional[Any] = k.split('''_''' )
__magic_name__ : List[str] = ''' '''.join([part.capitalize() for part in splits[1:]] )
__magic_name__ : Optional[int] = v
self.training_tracker.write_line(lowerCamelCase_ )
self.training_tracker.remove_child()
__magic_name__ : List[str] = None
# Evaluation takes a long time so we should force the next update.
__magic_name__ : str = True
def UpperCAmelCase__ ( self : List[str] , lowerCamelCase_ : Dict , lowerCamelCase_ : Optional[Any] , lowerCamelCase_ : Optional[int] , **lowerCamelCase_ : Optional[Any] ) -> Optional[Any]:
self.training_tracker.update(
state.global_step , comment=F'''Epoch {int(state.epoch )}/{state.num_train_epochs}''' , force_update=lowerCamelCase_ )
__magic_name__ : List[Any] = None
| 715 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
'''stable diffusion controlnet''',
'''0.22.0''',
'''Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.''',
standard_warn=False,
stacklevel=3,
)
| 501 | 0 |
"""simple docstring"""
from bisect import bisect
from itertools import accumulate
def UpperCamelCase (SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
UpperCamelCase : Dict = sorted(zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , key=lambda SCREAMING_SNAKE_CASE : x[0] / x[1] , reverse=SCREAMING_SNAKE_CASE )
UpperCamelCase , UpperCamelCase : int = [i[0] for i in r], [i[1] for i in r]
UpperCamelCase : Optional[Any] = list(accumulate(SCREAMING_SNAKE_CASE ) )
UpperCamelCase : Optional[Any] = bisect(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 102 |
'''simple docstring'''
# coding=utf-8
# Copyright 2020 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import sys
import transformers
__a = "3"
print("Python version:", sys.version)
print("transformers version:", transformers.__version__)
try:
import torch
print("Torch version:", torch.__version__)
print("Cuda available:", torch.cuda.is_available())
print("Cuda version:", torch.version.cuda)
print("CuDNN version:", torch.backends.cudnn.version())
print("Number of GPUs available:", torch.cuda.device_count())
print("NCCL version:", torch.cuda.nccl.version())
except ImportError:
print("Torch version:", None)
try:
import deepspeed
print("DeepSpeed version:", deepspeed.__version__)
except ImportError:
print("DeepSpeed version:", None)
try:
import tensorflow as tf
print("TensorFlow version:", tf.__version__)
print("TF GPUs available:", bool(tf.config.list_physical_devices("GPU")))
print("Number of TF GPUs available:", len(tf.config.list_physical_devices("GPU")))
except ImportError:
print("TensorFlow version:", None)
| 374 | 0 |
'''simple docstring'''
from itertools import product
def __A ( a_ : int ,a_ : int ):
lowerCAmelCase : Optional[Any] = sides_number
lowerCAmelCase : Optional[int] = max_face_number * dice_number
lowerCAmelCase : List[Any] = [0] * (max_total + 1)
lowerCAmelCase : Optional[int] = 1
lowerCAmelCase : str = range(a_ ,max_face_number + 1 )
for dice_numbers in product(a_ ,repeat=a_ ):
lowerCAmelCase : Union[str, Any] = sum(a_ )
totals_frequencies[total] += 1
return totals_frequencies
def __A ( ):
lowerCAmelCase : Union[str, Any] = total_frequency_distribution(
sides_number=4 ,dice_number=9 )
lowerCAmelCase : Union[str, Any] = total_frequency_distribution(
sides_number=6 ,dice_number=6 )
lowerCAmelCase : Any = 0
lowerCAmelCase : Any = 9
lowerCAmelCase : int = 4 * 9
lowerCAmelCase : List[Any] = 6
for peter_total in range(a_ ,max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
lowerCAmelCase : List[str] = (4**9) * (6**6)
lowerCAmelCase : Optional[Any] = peter_wins_count / total_games_number
lowerCAmelCase : Optional[int] = round(a_ ,ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'''{solution() = }''')
| 706 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class lowerCamelCase ( _A ):
snake_case_ = ["image_processor", "tokenizer"]
snake_case_ = "ChineseCLIPImageProcessor"
snake_case_ = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , a_=None , a_=None , **a_ ):
lowerCAmelCase : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , a_ , )
lowerCAmelCase : Optional[Any] = kwargs.pop("feature_extractor" )
lowerCAmelCase : int = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(a_ , a_ )
lowerCAmelCase : Dict = self.image_processor
def __call__( self , a_=None , a_=None , a_=None , **a_ ):
if text is None and images is None:
raise ValueError("You have to specify either text or images. Both cannot be none." )
if text is not None:
lowerCAmelCase : List[str] = self.tokenizer(a_ , return_tensors=a_ , **a_ )
if images is not None:
lowerCAmelCase : List[Any] = self.image_processor(a_ , return_tensors=a_ , **a_ )
if text is not None and images is not None:
lowerCAmelCase : str = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**a_ ) , tensor_type=a_ )
def _lowerCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.batch_decode(*a_ , **a_ )
def _lowerCamelCase ( self , *a_ , **a_ ):
return self.tokenizer.decode(*a_ , **a_ )
@property
def _lowerCamelCase ( self ):
lowerCAmelCase : str = self.tokenizer.model_input_names
lowerCAmelCase : List[Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _lowerCamelCase ( self ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , a_ , )
return self.image_processor_class
| 551 | 0 |
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
UpperCAmelCase_ = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase__ ( _A):
"""simple docstring"""
def __init__( self : Optional[int] , __lowerCAmelCase : WhisperForConditionalGeneration , __lowerCAmelCase : WhisperProcessor , __lowerCAmelCase : AutoencoderKL , __lowerCAmelCase : CLIPTextModel , __lowerCAmelCase : CLIPTokenizer , __lowerCAmelCase : UNetaDConditionModel , __lowerCAmelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __lowerCAmelCase : StableDiffusionSafetyChecker , __lowerCAmelCase : CLIPImageProcessor , ) -> Dict:
super().__init__()
if safety_checker is None:
logger.warning(
f'''You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'''
''' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'''
''' results in services or applications open to the public. Both the diffusers team and Hugging Face'''
''' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'''
''' it only for use-cases that involve analyzing network behavior or auditing its results. For more'''
''' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .''' )
self.register_modules(
speech_model=__lowerCAmelCase , speech_processor=__lowerCAmelCase , vae=__lowerCAmelCase , text_encoder=__lowerCAmelCase , tokenizer=__lowerCAmelCase , unet=__lowerCAmelCase , scheduler=__lowerCAmelCase , feature_extractor=__lowerCAmelCase , )
def snake_case_ ( self : List[str] , __lowerCAmelCase : Optional[Union[str, int]] = "auto" ) -> List[str]:
if slice_size == "auto":
_A = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__lowerCAmelCase )
def snake_case_ ( self : int ) -> List[Any]:
self.enable_attention_slicing(__lowerCAmelCase )
@torch.no_grad()
def __call__( self : Tuple , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any=1_60_00 , __lowerCAmelCase : int = 5_12 , __lowerCAmelCase : int = 5_12 , __lowerCAmelCase : int = 50 , __lowerCAmelCase : float = 7.5 , __lowerCAmelCase : Optional[Union[str, List[str]]] = None , __lowerCAmelCase : Optional[int] = 1 , __lowerCAmelCase : float = 0.0 , __lowerCAmelCase : Optional[torch.Generator] = None , __lowerCAmelCase : Optional[torch.FloatTensor] = None , __lowerCAmelCase : Optional[str] = "pil" , __lowerCAmelCase : bool = True , __lowerCAmelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __lowerCAmelCase : int = 1 , **__lowerCAmelCase : Optional[int] , ) -> Optional[Any]:
_A = self.speech_processor.feature_extractor(
__lowerCAmelCase , return_tensors='''pt''' , sampling_rate=__lowerCAmelCase ).input_features.to(self.device )
_A = self.speech_model.generate(__lowerCAmelCase , max_length=48_00_00 )
_A = self.speech_processor.tokenizer.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase , normalize=__lowerCAmelCase )[
0
]
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_A = 1
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_A = len(__lowerCAmelCase )
else:
raise ValueError(f'''`prompt` has to be of type `str` or `list` but is {type(__lowerCAmelCase )}''' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` have to be divisible by 8 but are {height} and {width}.''' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(__lowerCAmelCase , __lowerCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(__lowerCAmelCase )}.''' )
# get prompt text embeddings
_A = self.tokenizer(
__lowerCAmelCase , padding='''max_length''' , max_length=self.tokenizer.model_max_length , return_tensors='''pt''' , )
_A = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_A = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'''The following part of your input was truncated because CLIP can only handle sequences up to'''
f''' {self.tokenizer.model_max_length} tokens: {removed_text}''' )
_A = text_input_ids[:, : self.tokenizer.model_max_length]
_A = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
_A , _A , _A = text_embeddings.shape
_A = text_embeddings.repeat(1 , __lowerCAmelCase , 1 )
_A = text_embeddings.view(bs_embed * num_images_per_prompt , __lowerCAmelCase , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_A = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_A = 42
if negative_prompt is None:
_A = [''''''] * batch_size
elif type(__lowerCAmelCase ) is not type(__lowerCAmelCase ):
raise TypeError(
f'''`negative_prompt` should be the same type to `prompt`, but got {type(__lowerCAmelCase )} !='''
f''' {type(__lowerCAmelCase )}.''' )
elif isinstance(__lowerCAmelCase , __lowerCAmelCase ):
_A = [negative_prompt]
elif batch_size != len(__lowerCAmelCase ):
raise ValueError(
f'''`negative_prompt`: {negative_prompt} has batch size {len(__lowerCAmelCase )}, but `prompt`:'''
f''' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'''
''' the batch size of `prompt`.''' )
else:
_A = negative_prompt
_A = text_input_ids.shape[-1]
_A = self.tokenizer(
__lowerCAmelCase , padding='''max_length''' , max_length=__lowerCAmelCase , truncation=__lowerCAmelCase , return_tensors='''pt''' , )
_A = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_A = uncond_embeddings.shape[1]
_A = uncond_embeddings.repeat(1 , __lowerCAmelCase , 1 )
_A = uncond_embeddings.view(batch_size * num_images_per_prompt , __lowerCAmelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_A = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_A = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
_A = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
_A = torch.randn(__lowerCAmelCase , generator=__lowerCAmelCase , device='''cpu''' , dtype=__lowerCAmelCase ).to(
self.device )
else:
_A = torch.randn(__lowerCAmelCase , generator=__lowerCAmelCase , device=self.device , dtype=__lowerCAmelCase )
else:
if latents.shape != latents_shape:
raise ValueError(f'''Unexpected latents shape, got {latents.shape}, expected {latents_shape}''' )
_A = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(__lowerCAmelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
_A = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_A = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_A = '''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_A = {}
if accepts_eta:
_A = eta
for i, t in enumerate(self.progress_bar(__lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
_A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_A = self.scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase )
# predict the noise residual
_A = self.unet(__lowerCAmelCase , __lowerCAmelCase , encoder_hidden_states=__lowerCAmelCase ).sample
# perform guidance
if do_classifier_free_guidance:
_A , _A = noise_pred.chunk(2 )
_A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
_A = self.scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
_A = 1 / 0.1_8215 * latents
_A = self.vae.decode(__lowerCAmelCase ).sample
_A = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_A = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_A = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=__lowerCAmelCase , nsfw_content_detected=__lowerCAmelCase )
| 2 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class snake_case_ (nn.Module ):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCamelCase__( self :int ) -> Dict:
a__ = []
a__ = []
for i in range(self.num_layers ):
a__ = self.in_channels if i == 0 else self.out_channels
a__ = FlaxResnetBlockaD(
in_channels=__snake_case ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(__snake_case )
a__ = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(__snake_case )
a__ = resnets
a__ = attentions
if self.add_downsample:
a__ = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self :Dict ,__snake_case :str ,__snake_case :Optional[Any] ,__snake_case :Optional[int] ,__snake_case :Tuple=True ) -> Tuple:
a__ = ()
for resnet, attn in zip(self.resnets ,self.attentions ):
a__ = resnet(__snake_case ,__snake_case ,deterministic=__snake_case )
a__ = attn(__snake_case ,__snake_case ,deterministic=__snake_case )
output_states += (hidden_states,)
if self.add_downsample:
a__ = self.downsamplers_a(__snake_case )
output_states += (hidden_states,)
return hidden_states, output_states
class snake_case_ (nn.Module ):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCamelCase__( self :Optional[Any] ) -> Dict:
a__ = []
for i in range(self.num_layers ):
a__ = self.in_channels if i == 0 else self.out_channels
a__ = FlaxResnetBlockaD(
in_channels=__snake_case ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(__snake_case )
a__ = resnets
if self.add_downsample:
a__ = FlaxDownsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self :Optional[Any] ,__snake_case :str ,__snake_case :Dict ,__snake_case :Any=True ) -> List[Any]:
a__ = ()
for resnet in self.resnets:
a__ = resnet(__snake_case ,__snake_case ,deterministic=__snake_case )
output_states += (hidden_states,)
if self.add_downsample:
a__ = self.downsamplers_a(__snake_case )
output_states += (hidden_states,)
return hidden_states, output_states
class snake_case_ (nn.Module ):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCamelCase__( self :Tuple ) -> List[str]:
a__ = []
a__ = []
for i in range(self.num_layers ):
a__ = self.in_channels if (i == self.num_layers - 1) else self.out_channels
a__ = self.prev_output_channel if i == 0 else self.out_channels
a__ = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(__snake_case )
a__ = FlaxTransformeraDModel(
in_channels=self.out_channels ,n_heads=self.num_attention_heads ,d_head=self.out_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,only_cross_attention=self.only_cross_attention ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(__snake_case )
a__ = resnets
a__ = attentions
if self.add_upsample:
a__ = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self :List[str] ,__snake_case :int ,__snake_case :List[Any] ,__snake_case :Union[str, Any] ,__snake_case :Union[str, Any] ,__snake_case :Dict=True ) -> int:
for resnet, attn in zip(self.resnets ,self.attentions ):
# pop res hidden states
a__ = res_hidden_states_tuple[-1]
a__ = res_hidden_states_tuple[:-1]
a__ = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
a__ = resnet(__snake_case ,__snake_case ,deterministic=__snake_case )
a__ = attn(__snake_case ,__snake_case ,deterministic=__snake_case )
if self.add_upsample:
a__ = self.upsamplers_a(__snake_case )
return hidden_states
class snake_case_ (nn.Module ):
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = True
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCamelCase__( self :Union[str, Any] ) -> Any:
a__ = []
for i in range(self.num_layers ):
a__ = self.in_channels if (i == self.num_layers - 1) else self.out_channels
a__ = self.prev_output_channel if i == 0 else self.out_channels
a__ = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels ,out_channels=self.out_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(__snake_case )
a__ = resnets
if self.add_upsample:
a__ = FlaxUpsampleaD(self.out_channels ,dtype=self.dtype )
def __call__( self :Optional[int] ,__snake_case :List[Any] ,__snake_case :int ,__snake_case :Optional[Any] ,__snake_case :Optional[Any]=True ) -> List[str]:
for resnet in self.resnets:
# pop res hidden states
a__ = res_hidden_states_tuple[-1]
a__ = res_hidden_states_tuple[:-1]
a__ = jnp.concatenate((hidden_states, res_hidden_states) ,axis=-1 )
a__ = resnet(__snake_case ,__snake_case ,deterministic=__snake_case )
if self.add_upsample:
a__ = self.upsamplers_a(__snake_case )
return hidden_states
class snake_case_ (nn.Module ):
UpperCAmelCase__ : int
UpperCAmelCase__ : float = 0.0
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : int = 1
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : bool = False
UpperCAmelCase__ : jnp.dtype = jnp.floataa
def lowerCamelCase__( self :Tuple ) -> List[Any]:
# there is always at least one resnet
a__ = [
FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
]
a__ = []
for _ in range(self.num_layers ):
a__ = FlaxTransformeraDModel(
in_channels=self.in_channels ,n_heads=self.num_attention_heads ,d_head=self.in_channels // self.num_attention_heads ,depth=1 ,use_linear_projection=self.use_linear_projection ,use_memory_efficient_attention=self.use_memory_efficient_attention ,dtype=self.dtype ,)
attentions.append(__snake_case )
a__ = FlaxResnetBlockaD(
in_channels=self.in_channels ,out_channels=self.in_channels ,dropout_prob=self.dropout ,dtype=self.dtype ,)
resnets.append(__snake_case )
a__ = resnets
a__ = attentions
def __call__( self :Optional[Any] ,__snake_case :Union[str, Any] ,__snake_case :List[str] ,__snake_case :int ,__snake_case :int=True ) -> str:
a__ = self.resnets[0](__snake_case ,__snake_case )
for attn, resnet in zip(self.attentions ,self.resnets[1:] ):
a__ = attn(__snake_case ,__snake_case ,deterministic=__snake_case )
a__ = resnet(__snake_case ,__snake_case ,deterministic=__snake_case )
return hidden_states
| 335 | 0 |
'''simple docstring'''
from math import ceil
def lowerCAmelCase_ ( _lowerCamelCase: Optional[int] , _lowerCamelCase: List[str] ):
__SCREAMING_SNAKE_CASE : Optional[int] = list(range(0 , _lowerCamelCase ) )
__SCREAMING_SNAKE_CASE : List[Any] = [item for sublist in list(device_map.values() ) for item in sublist]
# Duplicate check
__SCREAMING_SNAKE_CASE : Dict = []
for i in device_map_blocks:
if device_map_blocks.count(_lowerCamelCase ) > 1 and i not in duplicate_blocks:
duplicate_blocks.append(_lowerCamelCase )
# Missing blocks
__SCREAMING_SNAKE_CASE : List[Any] = [i for i in blocks if i not in device_map_blocks]
__SCREAMING_SNAKE_CASE : Any = [i for i in device_map_blocks if i not in blocks]
if len(_lowerCamelCase ) != 0:
raise ValueError(
"""Duplicate attention blocks specified in device_map. Attention blocks must be specified to one device."""
""" These attention blocks were specified more than once: """ + str(_lowerCamelCase ) )
if len(_lowerCamelCase ) != 0:
raise ValueError(
"""There are attention blocks for this model that are not specified in the device_map. Add these attention """
"""blocks to a device on the device_map: """ + str(_lowerCamelCase ) )
if len(_lowerCamelCase ) != 0:
raise ValueError(
"""The device_map contains more attention blocks than this model has. Remove these from the device_map:"""
+ str(_lowerCamelCase ) )
def lowerCAmelCase_ ( _lowerCamelCase: Union[str, Any] , _lowerCamelCase: Optional[int] ):
__SCREAMING_SNAKE_CASE : List[str] = list(range(_lowerCamelCase ) )
__SCREAMING_SNAKE_CASE : Union[str, Any] = int(ceil(n_layers / len(_lowerCamelCase ) ) )
__SCREAMING_SNAKE_CASE : Dict = [layers[i : i + n_blocks] for i in range(0 , _lowerCamelCase , _lowerCamelCase )]
return dict(zip(_lowerCamelCase , _lowerCamelCase ) ) | 178 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
UpperCamelCase__ : Optional[Any] = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
UpperCamelCase__ : List[str] = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
UpperCamelCase__ : Dict = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _UpperCamelCase ( datasets.Metric ):
'''simple docstring'''
def UpperCamelCase__ ( self : str ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html"""
] , )
def UpperCamelCase__ ( self : Optional[Any] ):
"""simple docstring"""
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value("""float""" ) ),
"references": datasets.Sequence(datasets.Value("""float""" ) ),
}
else:
return {
"predictions": datasets.Value("""float""" ),
"references": datasets.Value("""float""" ),
}
def UpperCamelCase__ ( self : List[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : Optional[int]="uniform_average" , lowerCAmelCase__ : int=True ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : str = mean_squared_error(
lowerCAmelCase__ , lowerCAmelCase__ , sample_weight=lowerCAmelCase__ , multioutput=lowerCAmelCase__ , squared=lowerCAmelCase__ )
return {"mse": mse} | 178 | 1 |
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class lowerCAmelCase__ ( unittest.TestCase ):
def A_ ( self ) -> Optional[int]:
'''simple docstring'''
_UpperCamelCase = """hf-internal-testing/tiny-random-t5"""
_UpperCamelCase = AutoTokenizer.from_pretrained(a )
_UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained(a )
_UpperCamelCase = tokenizer("""This is me""" , return_tensors="""pt""" )
_UpperCamelCase = model.to_bettertransformer()
self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
_UpperCamelCase = model.generate(**a )
_UpperCamelCase = model.reverse_bettertransformer()
self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(a )
_UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained(a )
self.assertFalse(
any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
_UpperCamelCase = model_reloaded.generate(**a )
self.assertTrue(torch.allclose(a , a ) )
def A_ ( self ) -> Union[str, Any]:
'''simple docstring'''
_UpperCamelCase = """hf-internal-testing/tiny-random-t5"""
_UpperCamelCase = AutoModelForSeqaSeqLM.from_pretrained(a )
_UpperCamelCase = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(a ):
model.save_pretrained(a )
_UpperCamelCase = model.reverse_bettertransformer()
model.save_pretrained(a )
| 612 |
def __A(lowerCAmelCase ) -> bool:
"""simple docstring"""
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise ValueError("""check_bouncy() accepts only integer arguments""" )
_UpperCamelCase = str(lowerCAmelCase )
_UpperCamelCase = """""".join(sorted(lowerCAmelCase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def __A(lowerCAmelCase = 9_9 ) -> int:
"""simple docstring"""
if not 0 < percent < 1_0_0:
raise ValueError("""solution() only accepts values from 0 to 100""" )
_UpperCamelCase = 0
_UpperCamelCase = 1
while True:
if check_bouncy(lowerCAmelCase ):
bouncy_num += 1
if (bouncy_num / num) * 1_0_0 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(F"""{solution(99)}""")
| 612 | 1 |
import unittest
from transformers import JukeboxTokenizer
from transformers.testing_utils import require_torch
class __snake_case ( unittest.TestCase ):
__lowerCamelCase = JukeboxTokenizer
__lowerCamelCase = {
"""artist""": """Zac Brown Band""",
"""genres""": """Country""",
"""lyrics""": """I met a traveller from an antique land,
Who said \"Two vast and trunkless legs of stone
Stand in the desert. . . . Near them, on the sand,
Half sunk a shattered visage lies, whose frown,
And wrinkled lip, and sneer of cold command,
Tell that its sculptor well those passions read
Which yet survive, stamped on these lifeless things,
The hand that mocked them, and the heart that fed;
And on the pedestal, these words appear:
My name is Ozymandias, King of Kings;
Look on my Works, ye Mighty, and despair!
Nothing beside remains. Round the decay
Of that colossal Wreck, boundless and bare
The lone and level sands stretch far away
""",
}
@require_torch
def __a ( self ) -> int:
'''simple docstring'''
import torch
snake_case__ : int = JukeboxTokenizer.from_pretrained('openai/jukebox-1b-lyrics' )
snake_case__ : str = tokenizer(**self.metas )['input_ids']
# fmt: off
snake_case__ : Dict = [
torch.tensor([[
0, 0, 0, 7169, 507, 9, 76, 39, 31, 46, 76, 27,
76, 46, 44, 27, 48, 31, 38, 38, 31, 44, 76, 32,
44, 41, 39, 76, 27, 40, 76, 27, 40, 46, 35, 43,
47, 31, 76, 38, 27, 40, 30, 64, 78, 76, 76, 76,
76, 76, 76, 76, 76, 23, 34, 41, 76, 45, 27, 35,
30, 76, 71, 20, 49, 41, 76, 48, 27, 45, 46, 76,
27, 40, 30, 76, 46, 44, 47, 40, 37, 38, 31, 45,
45, 76, 38, 31, 33, 45, 76, 41, 32, 76, 45, 46,
41, 40, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
19, 46, 27, 40, 30, 76, 35, 40, 76, 46, 34, 31,
76, 30, 31, 45, 31, 44, 46, 63, 76, 63, 76, 63,
76, 63, 76, 14, 31, 27, 44, 76, 46, 34, 31, 39,
64, 76, 41, 40, 76, 46, 34, 31, 76, 45, 27, 40,
30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76, 8,
27, 38, 32, 76, 45, 47, 40, 37, 76, 27, 76, 45,
34, 27, 46, 46, 31, 44, 31, 30, 76, 48, 35, 45,
27, 33, 31, 76, 38, 35, 31, 45, 64, 76, 49, 34,
41, 45, 31, 76, 32, 44, 41, 49, 40, 64, 78, 76,
76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76, 49,
44, 35, 40, 37, 38, 31, 30, 76, 38, 35, 42, 64,
76, 27, 40, 30, 76, 45, 40, 31, 31, 44, 76, 41,
32, 76, 29, 41, 38, 30, 76, 29, 41, 39, 39, 27,
40, 30, 64, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 31, 38, 38, 76, 46, 34, 27, 46, 76, 35, 46,
45, 76, 45, 29, 47, 38, 42, 46, 41, 44, 76, 49,
31, 38, 38, 76, 46, 34, 41, 45, 31, 76, 42, 27,
45, 45, 35, 41, 40, 45, 76, 44, 31, 27, 30, 78,
76, 76, 76, 76, 76, 76, 76, 76, 23, 34, 35, 29,
34, 76, 51, 31, 46, 76, 45, 47, 44, 48, 35, 48,
31, 64, 76, 45, 46, 27, 39, 42, 31, 30, 76, 41,
40, 76, 46, 34, 31, 45, 31, 76, 38, 35, 32, 31,
38, 31, 45, 45, 76, 46, 34, 35, 40, 33, 45, 64,
78, 76, 76, 76, 76, 76, 76, 76, 76, 20, 34, 31,
76, 34, 27, 40, 30, 76, 46, 34, 27, 46, 76, 39,
41, 29, 37, 31, 30, 76, 46, 34, 31, 39, 64, 76,
27, 40, 30, 76, 46, 34, 31, 76, 34, 31, 27, 44,
46, 76, 46, 34, 27, 46, 76, 32, 31, 30, 66, 78,
76, 76, 76, 76, 76, 76, 76, 76, 1, 40, 30, 76,
41, 40, 76, 46, 34, 31, 76, 42, 31, 30, 31, 45,
46, 27, 38, 64, 76, 46, 34, 31, 45, 31, 76, 49,
41, 44, 30, 45, 76, 27, 42, 42, 31, 27, 44, 65,
78, 76, 76, 76, 76, 76, 76, 76, 76, 13, 51, 76,
40, 27, 39, 31, 76, 35, 45, 76, 15, 52, 51, 39,
27, 40, 30, 35, 27, 45, 64, 76, 11, 35, 40, 33,
76, 41, 32, 76, 11, 35, 40, 33, 45, 66, 78, 76,
76, 76, 76, 76, 76, 76, 76, 12, 41, 41, 37, 76,
41, 40, 76, 39, 51, 76, 23, 41, 44, 37, 45, 64,
76, 51, 31, 76, 13, 35, 33, 34, 46, 51, 64, 76,
27, 40, 30, 76, 30, 31, 45, 42, 27, 35, 44, 67,
78, 76, 76, 76, 76, 76, 76, 76, 76, 14, 41, 46,
34, 35, 40, 33, 76, 28, 31, 45, 35, 30, 31, 76,
44, 31, 39, 27, 35, 40, 45, 63, 76, 18, 41, 47,
40, 30, 76, 46, 34, 31, 76, 30, 31, 29, 27, 51,
78, 76, 76, 76, 76, 76, 76, 76, 76, 15, 32, 76,
46, 34, 27, 46, 76, 29, 41, 38, 41, 45, 45, 27,
38, 76, 23, 44, 31, 29, 37, 64, 76, 28, 41, 47,
40, 30, 38, 31, 45, 45, 76, 27, 40, 30, 76, 28,
27, 44, 31, 78, 76, 76, 76, 76, 76, 76, 76, 76,
20, 34, 31, 76, 38, 41, 40, 31, 76, 27, 40, 30,
76, 38, 31, 48, 31, 38, 76, 45, 27, 40, 30, 45,
76, 45, 46, 44, 31, 46, 29, 34, 76, 32, 27, 44,
76, 27, 49, 27, 51, 78, 76, 76, 76, 76, 76, 76,
76, 76]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
torch.tensor([[0, 0, 0, 1069, 11]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
@require_torch
def __a ( self ) -> Optional[int]:
'''simple docstring'''
import torch
snake_case__ : int = JukeboxTokenizer.from_pretrained('openai/jukebox-5b-lyrics' )
snake_case__ : Dict = tokenizer(**self.metas )['input_ids']
# fmt: off
snake_case__ : Union[str, Any] = [
torch.tensor([[
0, 0, 0, 1069, 11, -1, -1, -1, -1, 9, 77, 39,
31, 46, 77, 27, 77, 46, 44, 27, 48, 31, 38, 38,
31, 44, 77, 32, 44, 41, 39, 77, 27, 40, 77, 27,
40, 46, 35, 43, 47, 31, 77, 38, 27, 40, 30, 64,
79, 77, 77, 77, 77, 77, 77, 77, 77, 23, 34, 41,
77, 45, 27, 35, 30, 77, 72, 20, 49, 41, 77, 48,
27, 45, 46, 77, 27, 40, 30, 77, 46, 44, 47, 40,
37, 38, 31, 45, 45, 77, 38, 31, 33, 45, 77, 41,
32, 77, 45, 46, 41, 40, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 19, 46, 27, 40, 30, 77, 35, 40,
77, 46, 34, 31, 77, 30, 31, 45, 31, 44, 46, 63,
77, 63, 77, 63, 77, 63, 77, 14, 31, 27, 44, 77,
46, 34, 31, 39, 64, 77, 41, 40, 77, 46, 34, 31,
77, 45, 27, 40, 30, 64, 79, 77, 77, 77, 77, 77,
77, 77, 77, 8, 27, 38, 32, 77, 45, 47, 40, 37,
77, 27, 77, 45, 34, 27, 46, 46, 31, 44, 31, 30,
77, 48, 35, 45, 27, 33, 31, 77, 38, 35, 31, 45,
64, 77, 49, 34, 41, 45, 31, 77, 32, 44, 41, 49,
40, 64, 79, 77, 77, 77, 77, 77, 77, 77, 77, 1,
40, 30, 77, 49, 44, 35, 40, 37, 38, 31, 30, 77,
38, 35, 42, 64, 77, 27, 40, 30, 77, 45, 40, 31,
31, 44, 77, 41, 32, 77, 29, 41, 38, 30, 77, 29,
41, 39, 39, 27, 40, 30, 64, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 31, 38, 38, 77, 46, 34, 27,
46, 77, 35, 46, 45, 77, 45, 29, 47, 38, 42, 46,
41, 44, 77, 49, 31, 38, 38, 77, 46, 34, 41, 45,
31, 77, 42, 27, 45, 45, 35, 41, 40, 45, 77, 44,
31, 27, 30, 79, 77, 77, 77, 77, 77, 77, 77, 77,
23, 34, 35, 29, 34, 77, 51, 31, 46, 77, 45, 47,
44, 48, 35, 48, 31, 64, 77, 45, 46, 27, 39, 42,
31, 30, 77, 41, 40, 77, 46, 34, 31, 45, 31, 77,
38, 35, 32, 31, 38, 31, 45, 45, 77, 46, 34, 35,
40, 33, 45, 64, 79, 77, 77, 77, 77, 77, 77, 77,
77, 20, 34, 31, 77, 34, 27, 40, 30, 77, 46, 34,
27, 46, 77, 39, 41, 29, 37, 31, 30, 77, 46, 34,
31, 39, 64, 77, 27, 40, 30, 77, 46, 34, 31, 77,
34, 31, 27, 44, 46, 77, 46, 34, 27, 46, 77, 32,
31, 30, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77,
1, 40, 30, 77, 41, 40, 77, 46, 34, 31, 77, 42,
31, 30, 31, 45, 46, 27, 38, 64, 77, 46, 34, 31,
45, 31, 77, 49, 41, 44, 30, 45, 77, 27, 42, 42,
31, 27, 44, 65, 79, 77, 77, 77, 77, 77, 77, 77,
77, 13, 51, 77, 40, 27, 39, 31, 77, 35, 45, 77,
15, 52, 51, 39, 27, 40, 30, 35, 27, 45, 64, 77,
11, 35, 40, 33, 77, 41, 32, 77, 11, 35, 40, 33,
45, 66, 79, 77, 77, 77, 77, 77, 77, 77, 77, 12,
41, 41, 37, 77, 41, 40, 77, 39, 51, 77, 23, 41,
44, 37, 45, 64, 77, 51, 31, 77, 13, 35, 33, 34,
46, 51, 64, 77, 27, 40, 30, 77, 30, 31, 45, 42,
27, 35, 44, 67, 79, 77, 77, 77, 77, 77, 77, 77,
77, 14, 41, 46, 34, 35, 40, 33, 77, 28, 31, 45,
35, 30, 31, 77, 44, 31, 39, 27, 35, 40, 45, 63,
77, 18, 41, 47, 40, 30, 77, 46, 34, 31, 77, 30,
31, 29, 27, 51, 79, 77, 77, 77, 77, 77, 77, 77,
77, 15, 32, 77, 46, 34, 27, 46, 77, 29, 41, 38,
41, 45, 45, 27, 38, 77, 23, 44, 31, 29, 37, 64,
77, 28, 41, 47, 40, 30, 38, 31, 45, 45, 77, 27,
40, 30, 77, 28, 27, 44, 31, 79, 77, 77, 77, 77,
77, 77, 77, 77, 20, 34, 31, 77, 38, 41, 40, 31,
77, 27, 40, 30, 77, 38, 31, 48, 31, 38, 77, 45,
27, 40, 30, 45, 77, 45, 46, 44, 31, 46, 29, 34,
77, 32, 27, 44, 77, 27, 49, 27, 51, 79, 77, 77,
77, 77, 77, 77, 77, 77]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
torch.tensor([[0, 0, 0, 1069, 11, -1, -1, -1, -1]] ),
]
# fmt: on
self.assertTrue(torch.allclose(tokens[0] , EXPECTED_OUTPUT[0] ) )
self.assertTrue(torch.allclose(tokens[1] , EXPECTED_OUTPUT[1] ) )
self.assertTrue(torch.allclose(tokens[2] , EXPECTED_OUTPUT[2] ) )
| 699 | from .data_collator import (
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForSeqaSeq,
DataCollatorForSOP,
DataCollatorForTokenClassification,
DataCollatorForWholeWordMask,
DataCollatorWithPadding,
DefaultDataCollator,
default_data_collator,
)
from .metrics import glue_compute_metrics, xnli_compute_metrics
from .processors import (
DataProcessor,
InputExample,
InputFeatures,
SingleSentenceClassificationProcessor,
SquadExample,
SquadFeatures,
SquadVaProcessor,
SquadVaProcessor,
glue_convert_examples_to_features,
glue_output_modes,
glue_processors,
glue_tasks_num_labels,
squad_convert_examples_to_features,
xnli_output_modes,
xnli_processors,
xnli_tasks_num_labels,
)
| 699 | 1 |
'''simple docstring'''
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class _lowerCAmelCase :
"""simple docstring"""
def __init__( self : Optional[int] , __snake_case : Dict , __snake_case : Optional[int]=1_00 , __snake_case : Tuple=13 , __snake_case : List[Any]=30 , __snake_case : Union[str, Any]=2 , __snake_case : Tuple=3 , __snake_case : Union[str, Any]=True , __snake_case : List[str]=True , __snake_case : Optional[int]=32 , __snake_case : List[str]=4 , __snake_case : Optional[Any]=4 , __snake_case : str=37 , __snake_case : List[Any]="gelu" , __snake_case : Union[str, Any]=0.1 , __snake_case : str=0.1 , __snake_case : Union[str, Any]=10 , __snake_case : Any=0.02 , __snake_case : Tuple=3 , __snake_case : List[str]=None , __snake_case : Dict=[0, 1, 2, 3] , )-> str:
snake_case = parent
snake_case = 1_00
snake_case = batch_size
snake_case = image_size
snake_case = patch_size
snake_case = num_channels
snake_case = is_training
snake_case = use_labels
snake_case = hidden_size
snake_case = num_hidden_layers
snake_case = num_attention_heads
snake_case = intermediate_size
snake_case = hidden_act
snake_case = hidden_dropout_prob
snake_case = attention_probs_dropout_prob
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = scope
snake_case = out_indices
snake_case = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case = (image_size // patch_size) ** 2
snake_case = num_patches + 1
def lowerCAmelCase ( self : int )-> List[Any]:
snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case = None
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case = self.get_config()
return config, pixel_values, labels, pixel_labels
def lowerCAmelCase ( self : Tuple )-> Optional[Any]:
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__snake_case , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def lowerCAmelCase ( self : Optional[int] , __snake_case : List[Any] , __snake_case : Optional[int] , __snake_case : Any , __snake_case : Dict )-> List[str]:
snake_case = BeitModel(config=__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase ( self : str , __snake_case : int , __snake_case : List[Any] , __snake_case : Dict , __snake_case : Tuple )-> int:
snake_case = BeitForMaskedImageModeling(config=__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def lowerCAmelCase ( self : List[Any] , __snake_case : str , __snake_case : int , __snake_case : Any , __snake_case : Union[str, Any] )-> Any:
snake_case = self.type_sequence_label_size
snake_case = BeitForImageClassification(__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case = 1
snake_case = BeitForImageClassification(__snake_case )
model.to(__snake_case )
model.eval()
snake_case = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase ( self : Tuple , __snake_case : Optional[int] , __snake_case : Tuple , __snake_case : str , __snake_case : str )-> int:
snake_case = self.num_labels
snake_case = BeitForSemanticSegmentation(__snake_case )
model.to(__snake_case )
model.eval()
snake_case = model(__snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
snake_case = model(__snake_case , labels=__snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def lowerCAmelCase ( self : Tuple )-> List[str]:
snake_case = self.prepare_config_and_inputs()
snake_case , snake_case , snake_case , snake_case = config_and_inputs
snake_case = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _lowerCAmelCase ( A__ , A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
snake_case_ = (
{
"feature-extraction": BeitModel,
"image-classification": BeitForImageClassification,
"image-segmentation": BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
def lowerCAmelCase ( self : str )-> List[Any]:
snake_case = BeitModelTester(self )
snake_case = ConfigTester(self , config_class=__snake_case , has_text_modality=__snake_case , hidden_size=37 )
def lowerCAmelCase ( self : List[Any] )-> List[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""BEiT does not use inputs_embeds""" )
def lowerCAmelCase ( self : Any )-> Tuple:
pass
@require_torch_multi_gpu
@unittest.skip(reason="""BEiT has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`""" )
def lowerCAmelCase ( self : str )-> Optional[Any]:
pass
def lowerCAmelCase ( self : Tuple )-> Optional[int]:
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = model_class(__snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__snake_case , nn.Linear ) )
def lowerCAmelCase ( self : Tuple )-> List[Any]:
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = model_class(__snake_case )
snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case = [*signature.parameters.keys()]
snake_case = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __snake_case )
def lowerCAmelCase ( self : int )-> int:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def lowerCAmelCase ( self : Dict )-> int:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__snake_case )
def lowerCAmelCase ( self : List[Any] )-> int:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__snake_case )
def lowerCAmelCase ( self : List[Any] )-> List[str]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__snake_case )
def lowerCAmelCase ( self : Optional[int] )-> Optional[int]:
if not self.model_tester.is_training:
return
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(__snake_case ), BeitForMaskedImageModeling]:
continue
snake_case = model_class(__snake_case )
model.to(__snake_case )
model.train()
snake_case = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
snake_case = model(**__snake_case ).loss
loss.backward()
def lowerCAmelCase ( self : List[str] )-> Dict:
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
snake_case = False
snake_case = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(__snake_case ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
snake_case = model_class(__snake_case )
model.gradient_checkpointing_enable()
model.to(__snake_case )
model.train()
snake_case = self._prepare_for_class(__snake_case , __snake_case , return_labels=__snake_case )
snake_case = model(**__snake_case ).loss
loss.backward()
def lowerCAmelCase ( self : Tuple )-> Union[str, Any]:
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = _config_zero_init(__snake_case )
for model_class in self.all_model_classes:
snake_case = model_class(config=__snake_case )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@slow
def lowerCAmelCase ( self : Optional[Any] )-> str:
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case = BeitModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
def __lowerCamelCase ( ) -> Union[str, Any]:
snake_case = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCAmelCase ( self : List[Any] )-> List[str]:
return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None
@slow
def lowerCAmelCase ( self : Optional[int] )-> int:
snake_case = BeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" ).to(__snake_case )
snake_case = self.default_image_processor
snake_case = prepare_img()
snake_case = image_processor(images=__snake_case , return_tensors="""pt""" ).pixel_values.to(__snake_case )
# prepare bool_masked_pos
snake_case = torch.ones((1, 1_96) , dtype=torch.bool ).to(__snake_case )
# forward pass
with torch.no_grad():
snake_case = model(pixel_values=__snake_case , bool_masked_pos=__snake_case )
snake_case = outputs.logits
# verify the logits
snake_case = torch.Size((1, 1_96, 81_92) )
self.assertEqual(logits.shape , __snake_case )
snake_case = torch.tensor(
[[-3.24_37, 0.50_72, -13.91_74], [-3.24_56, 0.49_48, -13.94_01], [-3.20_33, 0.51_21, -13.85_50]] ).to(__snake_case )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , __snake_case , atol=1e-2 ) )
@slow
def lowerCAmelCase ( self : Tuple )-> Optional[int]:
snake_case = BeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" ).to(__snake_case )
snake_case = self.default_image_processor
snake_case = prepare_img()
snake_case = image_processor(images=__snake_case , return_tensors="""pt""" ).to(__snake_case )
# forward pass
with torch.no_grad():
snake_case = model(**__snake_case )
snake_case = outputs.logits
# verify the logits
snake_case = torch.Size((1, 10_00) )
self.assertEqual(logits.shape , __snake_case )
snake_case = torch.tensor([-1.23_85, -1.09_87, -1.01_08] ).to(__snake_case )
self.assertTrue(torch.allclose(logits[0, :3] , __snake_case , atol=1e-4 ) )
snake_case = 2_81
self.assertEqual(logits.argmax(-1 ).item() , __snake_case )
@slow
def lowerCAmelCase ( self : str )-> Any:
snake_case = BeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" ).to(
__snake_case )
snake_case = self.default_image_processor
snake_case = prepare_img()
snake_case = image_processor(images=__snake_case , return_tensors="""pt""" ).to(__snake_case )
# forward pass
with torch.no_grad():
snake_case = model(**__snake_case )
snake_case = outputs.logits
# verify the logits
snake_case = torch.Size((1, 2_18_41) )
self.assertEqual(logits.shape , __snake_case )
snake_case = torch.tensor([1.68_81, -0.27_87, 0.59_01] ).to(__snake_case )
self.assertTrue(torch.allclose(logits[0, :3] , __snake_case , atol=1e-4 ) )
snake_case = 23_96
self.assertEqual(logits.argmax(-1 ).item() , __snake_case )
@slow
def lowerCAmelCase ( self : Union[str, Any] )-> int:
snake_case = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
snake_case = model.to(__snake_case )
snake_case = BeitImageProcessor(do_resize=__snake_case , size=6_40 , do_center_crop=__snake_case )
snake_case = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
snake_case = Image.open(ds[0]["""file"""] )
snake_case = image_processor(images=__snake_case , return_tensors="""pt""" ).to(__snake_case )
# forward pass
with torch.no_grad():
snake_case = model(**__snake_case )
snake_case = outputs.logits
# verify the logits
snake_case = torch.Size((1, 1_50, 1_60, 1_60) )
self.assertEqual(logits.shape , __snake_case )
snake_case = version.parse(PIL.__version__ ) < version.parse("""9.0.0""" )
if is_pillow_less_than_a:
snake_case = torch.tensor(
[
[[-4.92_25, -2.39_54, -3.05_22], [-2.88_22, -1.00_46, -1.75_61], [-2.95_49, -1.32_28, -2.13_47]],
[[-5.81_68, -3.41_29, -4.07_78], [-3.86_51, -2.22_14, -3.02_77], [-3.83_56, -2.46_43, -3.35_35]],
[[-0.00_78, 3.99_52, 4.07_54], [2.98_56, 4.69_44, 5.00_35], [3.24_13, 4.78_13, 4.99_69]],
] , device=__snake_case , )
else:
snake_case = torch.tensor(
[
[[-4.89_60, -2.36_88, -3.03_55], [-2.84_78, -0.98_36, -1.74_18], [-2.94_49, -1.33_32, -2.14_56]],
[[-5.80_81, -3.41_24, -4.10_06], [-3.85_61, -2.20_81, -3.03_23], [-3.83_65, -2.46_01, -3.36_69]],
[[-0.03_09, 3.98_68, 4.05_40], [2.96_40, 4.68_77, 4.99_76], [3.20_81, 4.76_90, 4.99_42]],
] , device=__snake_case , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , __snake_case , atol=1e-4 ) )
@slow
def lowerCAmelCase ( self : Tuple )-> Any:
snake_case = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
snake_case = model.to(__snake_case )
snake_case = BeitImageProcessor(do_resize=__snake_case , size=6_40 , do_center_crop=__snake_case )
snake_case = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
snake_case = Image.open(ds[0]["""file"""] )
snake_case = image_processor(images=__snake_case , return_tensors="""pt""" ).to(__snake_case )
# forward pass
with torch.no_grad():
snake_case = model(**__snake_case )
snake_case = outputs.logits.detach().cpu()
snake_case = image_processor.post_process_semantic_segmentation(outputs=__snake_case , target_sizes=[(5_00, 3_00)] )
snake_case = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , __snake_case )
snake_case = image_processor.post_process_semantic_segmentation(outputs=__snake_case )
snake_case = torch.Size((1_60, 1_60) )
self.assertEqual(segmentation[0].shape , __snake_case )
| 369 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCAmelCase_ = {
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase_ = [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
UpperCAmelCase_ = ["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
UpperCAmelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 539 | 0 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
__a : Optional[Any] = logging.get_logger(__name__)
class UpperCAmelCase( snake_case_ ):
"""simple docstring"""
a : Any = """linear"""
a : Optional[int] = """cosine"""
a : int = """cosine_with_restarts"""
a : Any = """polynomial"""
a : Dict = """constant"""
a : Dict = """constant_with_warmup"""
a : Dict = """piecewise_constant"""
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = -1 ) -> str:
return LambdaLR(SCREAMING_SNAKE_CASE_ ,lambda SCREAMING_SNAKE_CASE_ : 1 ,last_epoch=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = -1 ) -> List[Any]:
def lr_lambda(SCREAMING_SNAKE_CASE_ ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE_ ) / float(max(1.0 ,SCREAMING_SNAKE_CASE_ ) )
return 1.0
return LambdaLR(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,last_epoch=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = -1 ) -> Tuple:
lowercase__ : Optional[Any] = {}
lowercase__ : Dict = step_rules.split("," )
for rule_str in rule_list[:-1]:
lowercase__ , lowercase__ : int = rule_str.split(":" )
lowercase__ : Optional[Any] = int(SCREAMING_SNAKE_CASE_ )
lowercase__ : List[Any] = float(SCREAMING_SNAKE_CASE_ )
lowercase__ : Optional[Any] = value
lowercase__ : Union[str, Any] = float(rule_list[-1] )
def create_rules_function(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
def rule_func(SCREAMING_SNAKE_CASE_ ) -> float:
lowercase__ : Optional[Any] = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(SCREAMING_SNAKE_CASE_ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
lowercase__ : int = create_rules_function(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
return LambdaLR(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,last_epoch=SCREAMING_SNAKE_CASE_ )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=-1 ) -> Optional[int]:
def lr_lambda(SCREAMING_SNAKE_CASE_ ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE_ ) / float(max(1 ,SCREAMING_SNAKE_CASE_ ) )
return max(
0.0 ,float(num_training_steps - current_step ) / float(max(1 ,num_training_steps - num_warmup_steps ) ) )
return LambdaLR(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = 0.5 ,SCREAMING_SNAKE_CASE_ = -1 ) -> str:
def lr_lambda(SCREAMING_SNAKE_CASE_ ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE_ ) / float(max(1 ,SCREAMING_SNAKE_CASE_ ) )
lowercase__ : Optional[Any] = float(current_step - num_warmup_steps ) / float(max(1 ,num_training_steps - num_warmup_steps ) )
return max(0.0 ,0.5 * (1.0 + math.cos(math.pi * float(SCREAMING_SNAKE_CASE_ ) * 2.0 * progress )) )
return LambdaLR(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = 1 ,SCREAMING_SNAKE_CASE_ = -1 ) -> Union[str, Any]:
def lr_lambda(SCREAMING_SNAKE_CASE_ ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE_ ) / float(max(1 ,SCREAMING_SNAKE_CASE_ ) )
lowercase__ : int = float(current_step - num_warmup_steps ) / float(max(1 ,num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 ,0.5 * (1.0 + math.cos(math.pi * ((float(SCREAMING_SNAKE_CASE_ ) * progress) % 1.0) )) )
return LambdaLR(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_=1E-7 ,SCREAMING_SNAKE_CASE_=1.0 ,SCREAMING_SNAKE_CASE_=-1 ) -> List[Any]:
lowercase__ : int = optimizer.defaults["lr"]
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(SCREAMING_SNAKE_CASE_ ):
if current_step < num_warmup_steps:
return float(SCREAMING_SNAKE_CASE_ ) / float(max(1 ,SCREAMING_SNAKE_CASE_ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
lowercase__ : Dict = lr_init - lr_end
lowercase__ : Any = num_training_steps - num_warmup_steps
lowercase__ : Dict = 1 - (current_step - num_warmup_steps) / decay_steps
lowercase__ : Dict = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
__a : List[Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def snake_case_ ( SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_ = 1 ,SCREAMING_SNAKE_CASE_ = 1.0 ,SCREAMING_SNAKE_CASE_ = -1 ,) -> Tuple:
lowercase__ : int = SchedulerType(SCREAMING_SNAKE_CASE_ )
lowercase__ : Optional[int] = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(SCREAMING_SNAKE_CASE_ ,last_epoch=SCREAMING_SNAKE_CASE_ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(SCREAMING_SNAKE_CASE_ ,step_rules=SCREAMING_SNAKE_CASE_ ,last_epoch=SCREAMING_SNAKE_CASE_ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(SCREAMING_SNAKE_CASE_ ,num_warmup_steps=SCREAMING_SNAKE_CASE_ ,last_epoch=SCREAMING_SNAKE_CASE_ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
SCREAMING_SNAKE_CASE_ ,num_warmup_steps=SCREAMING_SNAKE_CASE_ ,num_training_steps=SCREAMING_SNAKE_CASE_ ,num_cycles=SCREAMING_SNAKE_CASE_ ,last_epoch=SCREAMING_SNAKE_CASE_ ,)
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
SCREAMING_SNAKE_CASE_ ,num_warmup_steps=SCREAMING_SNAKE_CASE_ ,num_training_steps=SCREAMING_SNAKE_CASE_ ,power=SCREAMING_SNAKE_CASE_ ,last_epoch=SCREAMING_SNAKE_CASE_ ,)
return schedule_func(
SCREAMING_SNAKE_CASE_ ,num_warmup_steps=SCREAMING_SNAKE_CASE_ ,num_training_steps=SCREAMING_SNAKE_CASE_ ,last_epoch=SCREAMING_SNAKE_CASE_ ) | 298 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def snake_case_ ( *SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = None ,SCREAMING_SNAKE_CASE_=True ,SCREAMING_SNAKE_CASE_=2 ) -> Dict:
from .. import __version__
lowercase__ : List[str] = take_from
lowercase__ : Optional[Any] = ()
if not isinstance(args[0] ,SCREAMING_SNAKE_CASE_ ):
lowercase__ : Optional[Any] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(SCREAMING_SNAKE_CASE_ ).base_version ) >= version.parse(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
F"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
F""" version {__version__} is >= {version_name}""" )
lowercase__ : Tuple = None
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(SCREAMING_SNAKE_CASE_ ),)
lowercase__ : Optional[Any] = F"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
values += (getattr(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ),)
lowercase__ : str = F"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
lowercase__ : Any = F"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
lowercase__ : Union[str, Any] = warning + " " if standard_warn else ""
warnings.warn(warning + message ,SCREAMING_SNAKE_CASE_ ,stacklevel=SCREAMING_SNAKE_CASE_ )
if isinstance(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) > 0:
lowercase__ : List[str] = inspect.getouterframes(inspect.currentframe() )[1]
lowercase__ : Union[str, Any] = call_frame.filename
lowercase__ : Optional[Any] = call_frame.lineno
lowercase__ : str = call_frame.function
lowercase__ , lowercase__ : List[str] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return
elif len(SCREAMING_SNAKE_CASE_ ) == 1:
return values[0]
return values | 298 | 1 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class A_ :
"""simple docstring"""
def __init__( self : Dict ,__A : Any ,__A : Optional[int]=13 ,__A : Optional[Any]=30 ,__A : Any=2 ,__A : Tuple=3 ,__A : Any=True ,__A : str=True ,__A : Optional[int]=32 ,__A : Any=2 ,__A : str=4 ,__A : List[str]=37 ,__A : Union[str, Any]="gelu" ,__A : Tuple=0.1 ,__A : Union[str, Any]=0.1 ,__A : Tuple=10 ,__A : Optional[int]=0.02 ,__A : List[str]=3 ,__A : Dict=None ,) -> List[str]:
_lowercase = parent
_lowercase = batch_size
_lowercase = image_size
_lowercase = patch_size
_lowercase = num_channels
_lowercase = is_training
_lowercase = use_labels
_lowercase = hidden_size
_lowercase = num_hidden_layers
_lowercase = num_attention_heads
_lowercase = intermediate_size
_lowercase = hidden_act
_lowercase = hidden_dropout_prob
_lowercase = attention_probs_dropout_prob
_lowercase = type_sequence_label_size
_lowercase = initializer_range
_lowercase = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_lowercase = (image_size // patch_size) ** 2
_lowercase = num_patches + 1
def __UpperCAmelCase ( self : int ) -> str:
_lowercase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_lowercase = None
if self.use_labels:
_lowercase = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
_lowercase = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self : Optional[int] ) -> Dict:
return ViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=__A ,initializer_range=self.initializer_range ,)
def __UpperCAmelCase ( self : Tuple ,__A : Tuple ,__A : Union[str, Any] ,__A : Dict ) -> Dict:
_lowercase = TFViTModel(config=__A )
_lowercase = model(__A ,training=__A )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
_lowercase = self.image_size // 2
_lowercase = pixel_values[:, :, :image_size, :image_size]
_lowercase = model(__A ,interpolate_pos_encoding=__A ,training=__A )
_lowercase = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, seq_length, self.hidden_size) )
def __UpperCAmelCase ( self : Tuple ,__A : Union[str, Any] ,__A : Any ,__A : int ) -> List[str]:
_lowercase = self.type_sequence_label_size
_lowercase = TFViTForImageClassification(__A )
_lowercase = model(__A ,labels=__A ,training=__A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
_lowercase = self.image_size // 2
_lowercase = pixel_values[:, :, :image_size, :image_size]
_lowercase = model(__A ,interpolate_pos_encoding=__A ,training=__A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_lowercase = 1
_lowercase = TFViTForImageClassification(__A )
_lowercase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_lowercase = model(__A )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def __UpperCAmelCase ( self : Any ) -> List[Any]:
_lowercase = self.prepare_config_and_inputs()
_lowercase , _lowercase , _lowercase = config_and_inputs
_lowercase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class A_ ( UpperCAmelCase , UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
SCREAMING_SNAKE_CASE_ : Optional[Any] = (
{'''feature-extraction''': TFViTModel, '''image-classification''': TFViTForImageClassification}
if is_tf_available()
else {}
)
SCREAMING_SNAKE_CASE_ : Dict = False
SCREAMING_SNAKE_CASE_ : List[str] = False
SCREAMING_SNAKE_CASE_ : Optional[int] = False
def __UpperCAmelCase ( self : Optional[Any] ) -> Tuple:
_lowercase = TFViTModelTester(self )
_lowercase = ConfigTester(self ,config_class=__A ,has_text_modality=__A ,hidden_size=37 )
def __UpperCAmelCase ( self : int ) -> int:
self.config_tester.run_common_tests()
@unittest.skip(reason='ViT does not use inputs_embeds' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
pass
@unittest.skip(reason='ViT does not use inputs_embeds' )
def __UpperCAmelCase ( self : Union[str, Any] ) -> str:
pass
def __UpperCAmelCase ( self : Dict ) -> Optional[int]:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__A )
self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) )
_lowercase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A ,tf.keras.layers.Layer ) )
def __UpperCAmelCase ( self : Any ) -> List[str]:
_lowercase , _lowercase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase = model_class(__A )
_lowercase = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase = [*signature.parameters.keys()]
_lowercase = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__A )
def __UpperCAmelCase ( self : Dict ) -> Optional[Any]:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __UpperCAmelCase ( self : str ) -> int:
_lowercase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@slow
def __UpperCAmelCase ( self : int ) -> Any:
_lowercase = TFViTModel.from_pretrained('google/vit-base-patch16-224' )
self.assertIsNotNone(__A )
def SCREAMING_SNAKE_CASE__ ( ) -> List[str]:
_lowercase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class A_ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __UpperCAmelCase ( self : Optional[Any] ) -> int:
return ViTImageProcessor.from_pretrained('google/vit-base-patch16-224' ) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self : str ) -> Union[str, Any]:
_lowercase = TFViTForImageClassification.from_pretrained('google/vit-base-patch16-224' )
_lowercase = self.default_image_processor
_lowercase = prepare_img()
_lowercase = image_processor(images=__A ,return_tensors='tf' )
# forward pass
_lowercase = model(**__A )
# verify the logits
_lowercase = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape ,__A )
_lowercase = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3] ,__A ,atol=1e-4 ) | 67 |
def UpperCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
__lowercase : int = ''''''
for word_or_phrase in separated:
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise Exception('''join() accepts only strings to be joined''' )
joined += word_or_phrase + separator
return joined.strip(__lowerCAmelCase )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 509 | 0 |
"""simple docstring"""
from __future__ import annotations
def _lowerCamelCase( a ):
if not nums:
raise ValueError("List is empty" )
return sum(__snake_case ) / len(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 | """simple docstring"""
SCREAMING_SNAKE_CASE__:Any = """Alexander Joslin"""
import operator as op
from .stack import Stack
def _lowerCamelCase( a ):
__a = {"*": op.mul, "/": op.truediv, "+": op.add, "-": op.sub}
__a = Stack()
__a = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(a ) )
elif i in operators:
# RULE 2
operator_stack.push(a )
elif i == ")":
# RULE 4
__a = operator_stack.peek()
operator_stack.pop()
__a = operand_stack.peek()
operand_stack.pop()
__a = operand_stack.peek()
operand_stack.pop()
__a = operators[opr](a , a )
operand_stack.push(a )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:Tuple = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F'''{equation} = {dijkstras_two_stack_algorithm(equation)}''')
| 67 | 0 |
from __future__ import annotations
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase )-> list[list[int]]:
"""simple docstring"""
lowercase = []
lowercase = []
lowercase = 0
lowercase = sum(__lowerCamelCase )
create_state_space_tree(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase )
return result
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, UpperCAmelCase, )-> None:
"""simple docstring"""
if sum(__lowerCamelCase ) > max_sum or (remaining_nums_sum + sum(__lowerCamelCase )) < max_sum:
return
if sum(__lowerCamelCase ) == max_sum:
result.append(__lowerCamelCase )
return
for index in range(__lowerCamelCase, len(__lowerCamelCase ) ):
create_state_space_tree(
__lowerCamelCase, __lowerCamelCase, index + 1, [*path, nums[index]], __lowerCamelCase, remaining_nums_sum - nums[index], )
A_ = [3, 34, 4, 12, 5, 2]
A_ = 9
A_ = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 604 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ = logging.get_logger(__name__)
UpperCAmelCase__ = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class lowerCAmelCase__ ( A_ ):
__a = """mgp-str"""
def __init__( self : int , _lowerCamelCase : str=[32, 128] , _lowerCamelCase : List[str]=4 , _lowerCamelCase : Tuple=3 , _lowerCamelCase : Optional[Any]=27 , _lowerCamelCase : str=38 , _lowerCamelCase : int=50257 , _lowerCamelCase : Tuple=30522 , _lowerCamelCase : Any=768 , _lowerCamelCase : Dict=12 , _lowerCamelCase : Union[str, Any]=12 , _lowerCamelCase : Any=4.0 , _lowerCamelCase : Optional[int]=True , _lowerCamelCase : Optional[Any]=False , _lowerCamelCase : int=1e-5 , _lowerCamelCase : Dict=0.0 , _lowerCamelCase : int=0.0 , _lowerCamelCase : List[str]=0.0 , _lowerCamelCase : Dict=False , _lowerCamelCase : str=0.0_2 , **_lowerCamelCase : Optional[Any] , ):
super().__init__(**_lowerCamelCase )
_snake_case = image_size
_snake_case = patch_size
_snake_case = num_channels
_snake_case = max_token_length
_snake_case = num_character_labels
_snake_case = num_bpe_labels
_snake_case = num_wordpiece_labels
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = mlp_ratio
_snake_case = distilled
_snake_case = layer_norm_eps
_snake_case = drop_rate
_snake_case = qkv_bias
_snake_case = attn_drop_rate
_snake_case = drop_path_rate
_snake_case = output_aa_attentions
_snake_case = initializer_range
| 224 | 0 |
from ..utils import DummyObject, requires_backends
class _SCREAMING_SNAKE_CASE (metaclass=UpperCamelCase ):
lowerCAmelCase = ["""keras_nlp"""]
def __init__( self : List[str] , *UpperCamelCase : Dict , **UpperCamelCase : List[Any] )->Tuple:
requires_backends(self , ["keras_nlp"] )
| 447 |
from math import sqrt
def _lowerCAmelCase ( __lowerCamelCase : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(sqrt(__lowerCamelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _lowerCAmelCase ( __lowerCamelCase : int = 10001 ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : List[Any] = 0
__SCREAMING_SNAKE_CASE : Dict = 1
while count != nth and number < 3:
number += 1
if is_prime(__lowerCamelCase ):
count += 1
while count != nth:
number += 2
if is_prime(__lowerCamelCase ):
count += 1
return number
if __name__ == "__main__":
print(f'''{solution() = }''')
| 447 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowercase : Dict = {
'''configuration_instructblip''': [
'''INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''InstructBlipConfig''',
'''InstructBlipQFormerConfig''',
'''InstructBlipVisionConfig''',
],
'''processing_instructblip''': ['''InstructBlipProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowercase : Optional[int] = [
'''INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''InstructBlipQFormerModel''',
'''InstructBlipPreTrainedModel''',
'''InstructBlipForConditionalGeneration''',
'''InstructBlipVisionModel''',
]
if TYPE_CHECKING:
from .configuration_instructblip import (
INSTRUCTBLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
InstructBlipConfig,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
)
from .processing_instructblip import InstructBlipProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_instructblip import (
INSTRUCTBLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
InstructBlipForConditionalGeneration,
InstructBlipPreTrainedModel,
InstructBlipQFormerModel,
InstructBlipVisionModel,
)
else:
import sys
__lowercase : List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 36 |
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
__a = logging.get_logger(__name__)
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->Union[str, Any]:
UpperCAmelCase = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"""encoder.deit.blocks.{i}.norm1.weight""", F"""encoder.encoder.layer.{i}.layernorm_before.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.norm1.bias""", F"""encoder.encoder.layer.{i}.layernorm_before.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.attn.proj.weight""", F"""encoder.encoder.layer.{i}.attention.output.dense.weight""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.attn.proj.bias""", F"""encoder.encoder.layer.{i}.attention.output.dense.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.norm2.weight""", F"""encoder.encoder.layer.{i}.layernorm_after.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.norm2.bias""", F"""encoder.encoder.layer.{i}.layernorm_after.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc1.weight""", F"""encoder.encoder.layer.{i}.intermediate.dense.weight""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc1.bias""", F"""encoder.encoder.layer.{i}.intermediate.dense.bias""") )
rename_keys.append(
(F"""encoder.deit.blocks.{i}.mlp.fc2.weight""", F"""encoder.encoder.layer.{i}.output.dense.weight""") )
rename_keys.append((F"""encoder.deit.blocks.{i}.mlp.fc2.bias""", F"""encoder.encoder.layer.{i}.output.dense.bias""") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
("""encoder.deit.cls_token""", """encoder.embeddings.cls_token"""),
("""encoder.deit.pos_embed""", """encoder.embeddings.position_embeddings"""),
("""encoder.deit.patch_embed.proj.weight""", """encoder.embeddings.patch_embeddings.projection.weight"""),
("""encoder.deit.patch_embed.proj.bias""", """encoder.embeddings.patch_embeddings.projection.bias"""),
("""encoder.deit.norm.weight""", """encoder.layernorm.weight"""),
("""encoder.deit.norm.bias""", """encoder.layernorm.bias"""),
] )
return rename_keys
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->Any:
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
UpperCAmelCase = state_dict.pop(F"""encoder.deit.blocks.{i}.attn.qkv.weight""" )
UpperCAmelCase = in_proj_weight[
: encoder_config.hidden_size, :
]
UpperCAmelCase = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
UpperCAmelCase = in_proj_weight[
-encoder_config.hidden_size :, :
]
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ->Optional[Any]:
UpperCAmelCase = dct.pop(lowerCAmelCase_ )
UpperCAmelCase = val
def _UpperCamelCase ( lowerCAmelCase_ ) ->Any:
if "handwritten" in checkpoint_url:
UpperCAmelCase = """https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg""" # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
UpperCAmelCase = """https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg"""
UpperCAmelCase = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw ).convert("""RGB""" )
return im
@torch.no_grad()
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) ->Optional[int]:
UpperCAmelCase = ViTConfig(image_size=3_8_4 , qkv_bias=lowerCAmelCase_ )
UpperCAmelCase = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
UpperCAmelCase = 7_6_8
elif "large" in checkpoint_url:
# use ViT-large encoder
UpperCAmelCase = 1_0_2_4
UpperCAmelCase = 4_0_9_6
UpperCAmelCase = 2_4
UpperCAmelCase = 1_6
UpperCAmelCase = 1_0_2_4
else:
raise ValueError("""Should either find 'base' or 'large' in checkpoint URL""" )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
UpperCAmelCase = False
UpperCAmelCase = """relu"""
UpperCAmelCase = 1_0_2_4
UpperCAmelCase = True
UpperCAmelCase = False
UpperCAmelCase = False
# load HuggingFace model
UpperCAmelCase = ViTModel(lowerCAmelCase_ , add_pooling_layer=lowerCAmelCase_ )
UpperCAmelCase = TrOCRForCausalLM(lowerCAmelCase_ )
UpperCAmelCase = VisionEncoderDecoderModel(encoder=lowerCAmelCase_ , decoder=lowerCAmelCase_ )
model.eval()
# load state_dict of original model, rename some keys
UpperCAmelCase = torch.hub.load_state_dict_from_url(lowerCAmelCase_ , map_location="""cpu""" , check_hash=lowerCAmelCase_ )["""model"""]
UpperCAmelCase = create_rename_keys(lowerCAmelCase_ , lowerCAmelCase_ )
for src, dest in rename_keys:
rename_key(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
read_in_q_k_v(lowerCAmelCase_ , lowerCAmelCase_ )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
UpperCAmelCase = state_dict.pop(lowerCAmelCase_ )
if key.startswith("""decoder""" ) and "output_projection" not in key:
UpperCAmelCase = val
else:
UpperCAmelCase = val
# load state dict
model.load_state_dict(lowerCAmelCase_ )
# Check outputs on an image
UpperCAmelCase = ViTImageProcessor(size=encoder_config.image_size )
UpperCAmelCase = RobertaTokenizer.from_pretrained("""roberta-large""" )
UpperCAmelCase = TrOCRProcessor(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase = processor(images=prepare_img(lowerCAmelCase_ ) , return_tensors="""pt""" ).pixel_values
# verify logits
UpperCAmelCase = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
UpperCAmelCase = model(pixel_values=lowerCAmelCase_ , decoder_input_ids=lowerCAmelCase_ )
UpperCAmelCase = outputs.logits
UpperCAmelCase = torch.Size([1, 1, 5_0_2_6_5] )
if "trocr-base-handwritten" in checkpoint_url:
UpperCAmelCase = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] )
elif "trocr-large-handwritten" in checkpoint_url:
UpperCAmelCase = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] )
elif "trocr-base-printed" in checkpoint_url:
UpperCAmelCase = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] )
elif "trocr-large-printed" in checkpoint_url:
UpperCAmelCase = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :1_0] , lowerCAmelCase_ , atol=1e-3 ), "First elements of logits not as expected"
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
print(F"""Saving processor to {pytorch_dump_folder_path}""" )
processor.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
__a = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path)
| 377 | 0 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class A__ :
"""simple docstring"""
def __init__( self , lowercase , lowercase=13 , lowercase=10 , lowercase=3 , lowercase=2 , lowercase=2 , lowercase=True , lowercase=True , lowercase=32 , lowercase=5 , lowercase=4 , lowercase=37 , lowercase="gelu" , lowercase=0.1 , lowercase=0.1 , lowercase=10 , lowercase=0.02 , lowercase="divided_space_time" , lowercase=None , ) -> Optional[int]:
'''simple docstring'''
a__ : Optional[Any] = parent
a__ : Union[str, Any] = batch_size
a__ : Tuple = image_size
a__ : List[Any] = num_channels
a__ : Optional[int] = patch_size
a__ : Tuple = num_frames
a__ : Union[str, Any] = is_training
a__ : List[Any] = use_labels
a__ : int = hidden_size
a__ : Dict = num_hidden_layers
a__ : List[Any] = num_attention_heads
a__ : List[str] = intermediate_size
a__ : Dict = hidden_act
a__ : Optional[int] = hidden_dropout_prob
a__ : Tuple = attention_probs_dropout_prob
a__ : str = attention_type
a__ : int = initializer_range
a__ : Union[str, Any] = scope
a__ : Optional[Any] = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
a__ : str = (image_size // patch_size) ** 2
a__ : List[str] = (num_frames) * self.num_patches_per_frame + 1
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : Union[str, Any] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size])
a__ : List[Any] = None
if self.use_labels:
a__ : List[Any] = ids_tensor([self.batch_size] , self.num_labels)
a__ : str = self.get_config()
return config, pixel_values, labels
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : str = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
a__ : Union[str, Any] = self.num_labels
return config
def __lowercase ( self , lowercase , lowercase , lowercase) -> int:
'''simple docstring'''
a__ : Dict = TimesformerModel(config=lowercase)
model.to(lowercase)
model.eval()
a__ : List[Any] = model(lowercase)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def __lowercase ( self , lowercase , lowercase , lowercase) -> Optional[Any]:
'''simple docstring'''
a__ : Tuple = TimesformerForVideoClassification(lowercase)
model.to(lowercase)
model.eval()
a__ : str = model(lowercase)
# verify the logits shape
a__ : Tuple = torch.Size((self.batch_size, self.num_labels))
self.parent.assertEqual(result.logits.shape , lowercase)
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : int = self.prepare_config_and_inputs()
a__ : Dict = config_and_inputs
a__ : Optional[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __UpperCAmelCase , __UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
__A : Tuple = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
__A : int = (
{'''feature-extraction''': TimesformerModel, '''video-classification''': TimesformerForVideoClassification}
if is_torch_available()
else {}
)
__A : Any = False
__A : Tuple = False
__A : Any = False
__A : Optional[int] = False
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : List[Any] = TimesformerModelTester(self)
a__ : Optional[int] = ConfigTester(
self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37)
def __lowercase ( self , lowercase , lowercase , lowercase=False) -> str:
'''simple docstring'''
a__ : int = copy.deepcopy(lowercase)
if return_labels:
if model_class in get_values(lowercase):
a__ : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowercase)
return inputs_dict
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason='TimeSformer does not use inputs_embeds')
def __lowercase ( self) -> Optional[Any]:
'''simple docstring'''
pass
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Optional[Any] = model_class(lowercase)
self.assertIsInstance(model.get_input_embeddings() , (nn.Module))
a__ : Optional[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear))
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
a__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : Union[str, Any] = model_class(lowercase)
a__ : List[str] = inspect.signature(model.forward)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a__ : str = [*signature.parameters.keys()]
a__ : int = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowercase)
def __lowercase ( self) -> List[str]:
'''simple docstring'''
a__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase)
def __lowercase ( self) -> Dict:
'''simple docstring'''
a__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*lowercase)
@slow
def __lowercase ( self) -> Tuple:
'''simple docstring'''
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a__ : Optional[Any] = TimesformerModel.from_pretrained(lowercase)
self.assertIsNotNone(lowercase)
def __lowercase ( self) -> List[Any]:
'''simple docstring'''
if not self.has_attentions:
pass
else:
a__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
a__ : List[Any] = True
for model_class in self.all_model_classes:
a__ : int = self.model_tester.seq_length
a__ : Any = self.model_tester.num_frames
a__ : Dict = True
a__ : Tuple = False
a__ : int = True
a__ : Tuple = model_class(lowercase)
model.to(lowercase)
model.eval()
with torch.no_grad():
a__ : str = model(**self._prepare_for_class(lowercase , lowercase))
a__ : List[str] = outputs.attentions
self.assertEqual(len(lowercase) , self.model_tester.num_hidden_layers)
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
a__ : Tuple = True
a__ : Tuple = model_class(lowercase)
model.to(lowercase)
model.eval()
with torch.no_grad():
a__ : Tuple = model(**self._prepare_for_class(lowercase , lowercase))
a__ : str = outputs.attentions
self.assertEqual(len(lowercase) , self.model_tester.num_hidden_layers)
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
a__ : List[str] = len(lowercase)
# Check attention is always last and order is fine
a__ : int = True
a__ : Tuple = True
a__ : List[str] = model_class(lowercase)
model.to(lowercase)
model.eval()
with torch.no_grad():
a__ : Optional[Any] = model(**self._prepare_for_class(lowercase , lowercase))
self.assertEqual(out_len + 1 , len(lowercase))
a__ : Union[str, Any] = outputs.attentions
self.assertEqual(len(lowercase) , self.model_tester.num_hidden_layers)
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:]) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def __lowercase ( self) -> Any:
'''simple docstring'''
def check_hidden_states_output(lowercase , lowercase , lowercase):
a__ : Optional[Any] = model_class(lowercase)
model.to(lowercase)
model.eval()
with torch.no_grad():
a__ : Optional[Any] = model(**self._prepare_for_class(lowercase , lowercase))
a__ : List[str] = outputs.hidden_states
a__ : Tuple = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(lowercase) , lowercase)
a__ : Tuple = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:]) , [seq_length, self.model_tester.hidden_size] , )
a__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a__ : List[str] = True
check_hidden_states_output(lowercase , lowercase , lowercase)
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a__ : List[str] = True
check_hidden_states_output(lowercase , lowercase , lowercase)
def A_ ( ) -> Dict:
a__ : Tuple = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
a__ : Union[str, Any] = np.load(A__ )
return list(A__ )
@require_torch
@require_vision
class A__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def __lowercase ( self) -> Optional[int]:
'''simple docstring'''
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5])
if is_vision_available()
else None
)
@slow
def __lowercase ( self) -> str:
'''simple docstring'''
a__ : Union[str, Any] = TimesformerForVideoClassification.from_pretrained('facebook/timesformer-base-finetuned-k400').to(
lowercase)
a__ : Dict = self.default_image_processor
a__ : List[Any] = prepare_video()
a__ : Tuple = image_processor(video[:8] , return_tensors='pt').to(lowercase)
# forward pass
with torch.no_grad():
a__ : Dict = model(**lowercase)
# verify the logits
a__ : str = torch.Size((1, 400))
self.assertEqual(outputs.logits.shape , lowercase)
a__ : Dict = torch.tensor([-0.30_16, -0.77_13, -0.42_05]).to(lowercase)
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1e-4))
| 719 |
from scipy.stats import pearsonr
import datasets
lowercase : Any = """
Pearson correlation coefficient and p-value for testing non-correlation.
The Pearson correlation coefficient measures the linear relationship between two datasets. The calculation of the p-value relies on the assumption that each dataset is normally distributed. Like other correlation coefficients, this one varies between -1 and +1 with 0 implying no correlation. Correlations of -1 or +1 imply an exact linear relationship. Positive correlations imply that as x increases, so does y. Negative correlations imply that as x increases, y decreases.
The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets.
"""
lowercase : int = """
Args:
predictions (`list` of `int`): Predicted class labels, as returned by a model.
references (`list` of `int`): Ground truth labels.
return_pvalue (`boolean`): If `True`, returns the p-value, along with the correlation coefficient. If `False`, returns only the correlation coefficient. Defaults to `False`.
Returns:
pearsonr (`float`): Pearson correlation coefficient. Minimum possible value is -1. Maximum possible value is 1. Values of 1 and -1 indicate exact linear positive and negative relationships, respectively. A value of 0 implies no correlation.
p-value (`float`): P-value, which roughly indicates the probability of an The p-value roughly indicates the probability of an uncorrelated system producing datasets that have a Pearson correlation at least as extreme as the one computed from these datasets. Minimum possible value is 0. Maximum possible value is 1. Higher values indicate higher probabilities.
Examples:
Example 1-A simple example using only predictions and references.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5])
>>> print(round(results['pearsonr'], 2))
-0.74
Example 2-The same as Example 1, but that also returns the `p-value`.
>>> pearsonr_metric = datasets.load_metric(\"pearsonr\")
>>> results = pearsonr_metric.compute(predictions=[10, 9, 2.5, 6, 4], references=[1, 2, 3, 4, 5], return_pvalue=True)
>>> print(sorted(list(results.keys())))
['p-value', 'pearsonr']
>>> print(round(results['pearsonr'], 2))
-0.74
>>> print(round(results['p-value'], 2))
0.15
"""
lowercase : int = """
@article{2020SciPy-NMeth,
author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and
Haberland, Matt and Reddy, Tyler and Cournapeau, David and
Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and
Bright, Jonathan and {van der Walt}, St{\'e}fan J. and
Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and
Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and
Kern, Robert and Larson, Eric and Carey, C J and
Polat, Ilhan and Feng, Yu and Moore, Eric W. and
{VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and
Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and
Harris, Charles R. and Archibald, Anne M. and
Ribeiro, Antonio H. and Pedregosa, Fabian and
{van Mulbregt}, Paul and {SciPy 1.0 Contributors}},
title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific
Computing in Python}},
journal = {Nature Methods},
year = {2020},
volume = {17},
pages = {261--272},
adsurl = {https://rdcu.be/b08Wh},
doi = {10.1038/s41592-019-0686-2},
}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A__ ( datasets.Metric ):
"""simple docstring"""
def __lowercase ( self) -> int:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float'),
'references': datasets.Value('float'),
}) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.pearsonr.html'] , )
def __lowercase ( self , lowercase , lowercase , lowercase=False) -> int:
'''simple docstring'''
if return_pvalue:
a__ : int = pearsonr(lowercase , lowercase)
return {"pearsonr": results[0], "p-value": results[1]}
else:
return {"pearsonr": float(pearsonr(lowercase , lowercase)[0])}
| 392 | 0 |
import warnings
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowercase : List[str] = logging.get_logger(__name__)
lowercase : Union[str, Any] = {
"""RUCAIBox/mvp""": """https://huggingface.co/RUCAIBox/mvp/resolve/main/config.json""",
}
class a__ ( __SCREAMING_SNAKE_CASE ):
_A = "mvp"
_A = ["past_key_values"]
_A = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : Optional[int] , A_ : List[str]=5_02_67 , A_ : Dict=10_24 , A_ : Dict=12 , A_ : Optional[Any]=40_96 , A_ : Dict=16 , A_ : Dict=12 , A_ : int=40_96 , A_ : Union[str, Any]=16 , A_ : List[str]=0.0 , A_ : Optional[int]=0.0 , A_ : List[Any]="gelu" , A_ : Dict=10_24 , A_ : List[Any]=0.1 , A_ : str=0.0 , A_ : Optional[Any]=0.0 , A_ : Optional[Any]=0.02 , A_ : List[str]=0.0 , A_ : str=False , A_ : Union[str, Any]=True , A_ : Tuple=1 , A_ : Optional[Any]=0 , A_ : str=2 , A_ : str=True , A_ : List[Any]=2 , A_ : str=2 , A_ : List[Any]=False , A_ : List[Any]=1_00 , A_ : Optional[Any]=8_00 , **A_ : List[str] , ) -> int:
"""simple docstring"""
lowerCamelCase_: Optional[Any] = vocab_size
lowerCamelCase_: Any = max_position_embeddings
lowerCamelCase_: Tuple = d_model
lowerCamelCase_: List[str] = encoder_ffn_dim
lowerCamelCase_: List[str] = encoder_layers
lowerCamelCase_: List[str] = encoder_attention_heads
lowerCamelCase_: Optional[Any] = decoder_ffn_dim
lowerCamelCase_: Any = decoder_layers
lowerCamelCase_: Any = decoder_attention_heads
lowerCamelCase_: Union[str, Any] = dropout
lowerCamelCase_: List[Any] = attention_dropout
lowerCamelCase_: Optional[Any] = activation_dropout
lowerCamelCase_: str = activation_function
lowerCamelCase_: List[Any] = init_std
lowerCamelCase_: Tuple = encoder_layerdrop
lowerCamelCase_: Optional[Any] = decoder_layerdrop
lowerCamelCase_: str = classifier_dropout
lowerCamelCase_: Union[str, Any] = use_cache
lowerCamelCase_: Any = encoder_layers
lowerCamelCase_: Dict = scale_embedding # scale factor will be sqrt(d_model) if True
lowerCamelCase_: Union[str, Any] = use_prompt
lowerCamelCase_: int = prompt_length
lowerCamelCase_: Dict = prompt_mid_dim
super().__init__(
pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , is_encoder_decoder=A_ , decoder_start_token_id=A_ , forced_eos_token_id=A_ , **A_ , )
if self.forced_bos_token_id is None and kwargs.get("""force_bos_token_to_be_generated""" , A_ ):
lowerCamelCase_: Optional[Any] = self.bos_token_id
warnings.warn(
f"""Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. """
"""The config can simply be saved and uploaded again to be fixed.""" )
| 423 | from typing import Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import get_image_size, pad, rescale, to_channel_dimension_format
from ...image_utils import ChannelDimension, ImageInput, make_list_of_images, to_numpy_array, valid_images
from ...utils import TensorType, logging
lowercase : int = logging.get_logger(__name__)
class a__ ( __SCREAMING_SNAKE_CASE ):
_A = ["pixel_values"]
def __init__( self : Optional[Any] , A_ : bool = True , A_ : Union[int, float] = 1 / 2_55 , A_ : bool = True , A_ : int = 8 , **A_ : Dict , ) -> None:
"""simple docstring"""
super().__init__(**A_ )
lowerCamelCase_: str = do_rescale
lowerCamelCase_: List[str] = rescale_factor
lowerCamelCase_: Dict = do_pad
lowerCamelCase_: List[Any] = pad_size
def lowerCAmelCase ( self : Tuple , A_ : np.ndarray , A_ : float , A_ : Optional[Union[str, ChannelDimension]] = None , **A_ : Dict ) -> np.ndarray:
"""simple docstring"""
return rescale(A_ , scale=A_ , data_format=A_ , **A_ )
def lowerCAmelCase ( self : Optional[int] , A_ : np.ndarray , A_ : int , A_ : Optional[Union[str, ChannelDimension]] = None ) -> Tuple:
"""simple docstring"""
lowerCamelCase_ , lowerCamelCase_: Optional[int] = get_image_size(A_ )
lowerCamelCase_: Tuple = (old_height // size + 1) * size - old_height
lowerCamelCase_: Optional[Any] = (old_width // size + 1) * size - old_width
return pad(A_ , ((0, pad_height), (0, pad_width)) , mode="""symmetric""" , data_format=A_ )
def lowerCAmelCase ( self : str , A_ : ImageInput , A_ : Optional[bool] = None , A_ : Optional[float] = None , A_ : Optional[bool] = None , A_ : Optional[int] = None , A_ : Optional[Union[str, TensorType]] = None , A_ : Union[str, ChannelDimension] = ChannelDimension.FIRST , **A_ : str , ) -> Dict:
"""simple docstring"""
lowerCamelCase_: str = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase_: Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase_: Union[str, Any] = do_pad if do_pad is not None else self.do_pad
lowerCamelCase_: List[Any] = pad_size if pad_size is not None else self.pad_size
lowerCamelCase_: Optional[Any] = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
# All transformations expect numpy arrays.
lowerCamelCase_: List[str] = [to_numpy_array(A_ ) for image in images]
if do_rescale:
lowerCamelCase_: Tuple = [self.rescale(image=A_ , scale=A_ ) for image in images]
if do_pad:
lowerCamelCase_: List[str] = [self.pad(A_ , size=A_ ) for image in images]
lowerCamelCase_: Any = [to_channel_dimension_format(A_ , A_ ) for image in images]
lowerCamelCase_: Optional[int] = {"""pixel_values""": images}
return BatchFeature(data=A_ , tensor_type=A_ )
| 423 | 1 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import torch
class _snake_case ( TensorFormatter[Mapping, 'torch.Tensor', Mapping] ):
def __init__( self , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_):
'''simple docstring'''
super().__init__(features=SCREAMING_SNAKE_CASE_)
lowercase__ : Tuple = torch_tensor_kwargs
import torch # noqa import torch at initialization
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
import torch
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_) and column:
if all(
isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor) and x.shape == column[0].shape and x.dtype == column[0].dtype
for x in column):
return torch.stack(SCREAMING_SNAKE_CASE_)
return column
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
import torch
if isinstance(SCREAMING_SNAKE_CASE_ , (str, bytes, type(SCREAMING_SNAKE_CASE_))):
return value
elif isinstance(SCREAMING_SNAKE_CASE_ , (np.character, np.ndarray)) and np.issubdtype(value.dtype , np.character):
return value.tolist()
lowercase__ : Optional[Any] = {}
if isinstance(SCREAMING_SNAKE_CASE_ , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.integer):
lowercase__ : List[str] = {"""dtype""": torch.intaa}
elif isinstance(SCREAMING_SNAKE_CASE_ , (np.number, np.ndarray)) and np.issubdtype(value.dtype , np.floating):
lowercase__ : Optional[int] = {"""dtype""": torch.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(SCREAMING_SNAKE_CASE_ , PIL.Image.Image):
lowercase__ : str = np.asarray(SCREAMING_SNAKE_CASE_)
return torch.tensor(SCREAMING_SNAKE_CASE_ , **{**default_dtype, **self.torch_tensor_kwargs})
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
import torch
# support for torch, tf, jax etc.
if hasattr(SCREAMING_SNAKE_CASE_ , """__array__""") and not isinstance(SCREAMING_SNAKE_CASE_ , torch.Tensor):
lowercase__ : List[str] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray):
if data_struct.dtype == object: # torch tensors cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE_) for substruct in data_struct])
elif isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple)):
return self._consolidate([self.recursive_tensorize(SCREAMING_SNAKE_CASE_) for substruct in data_struct])
return self._tensorize(SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
return map_nested(self._recursive_tensorize , SCREAMING_SNAKE_CASE_ , map_list=SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : Optional[int] = self.numpy_arrow_extractor().extract_row(SCREAMING_SNAKE_CASE_)
lowercase__ : Dict = self.python_features_decoder.decode_row(SCREAMING_SNAKE_CASE_)
return self.recursive_tensorize(SCREAMING_SNAKE_CASE_)
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : List[str] = self.numpy_arrow_extractor().extract_column(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[Any] = self.python_features_decoder.decode_column(SCREAMING_SNAKE_CASE_ , pa_table.column_names[0])
lowercase__ : Union[str, Any] = self.recursive_tensorize(SCREAMING_SNAKE_CASE_)
lowercase__ : List[str] = self._consolidate(SCREAMING_SNAKE_CASE_)
return column
def lowercase__ ( self , SCREAMING_SNAKE_CASE_):
'''simple docstring'''
lowercase__ : str = self.numpy_arrow_extractor().extract_batch(SCREAMING_SNAKE_CASE_)
lowercase__ : Optional[int] = self.python_features_decoder.decode_batch(SCREAMING_SNAKE_CASE_)
lowercase__ : str = self.recursive_tensorize(SCREAMING_SNAKE_CASE_)
for column_name in batch:
lowercase__ : Union[str, Any] = self._consolidate(batch[column_name])
return batch
| 702 |
lowerCamelCase__ : str = [
[0, 1_6, 1_3, 0, 0, 0],
[0, 0, 1_0, 1_2, 0, 0],
[0, 4, 0, 0, 1_4, 0],
[0, 0, 9, 0, 0, 2_0],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> str:
'''simple docstring'''
lowercase__ : Any = [False] * len(lowercase_ )
lowercase__ : List[Any] = [s]
lowercase__ : int = True
while queue:
lowercase__ : Dict = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(lowercase_ )
lowercase__ : Optional[Any] = True
lowercase__ : int = u
return visited[t]
def UpperCamelCase ( lowercase_ , lowercase_ , lowercase_ ) -> int:
'''simple docstring'''
lowercase__ : Optional[int] = [-1] * (len(lowercase_ ))
lowercase__ : Optional[Any] = 0
lowercase__ : List[Any] = []
lowercase__ : Optional[Any] = [i[:] for i in graph] # Record original cut, copy.
while bfs(lowercase_ , lowercase_ , lowercase_ , lowercase_ ):
lowercase__ : Optional[int] = float("""Inf""" )
lowercase__ : Tuple = sink
while s != source:
# Find the minimum value in select path
lowercase__ : List[str] = min(lowercase_ , graph[parent[s]][s] )
lowercase__ : List[str] = parent[s]
max_flow += path_flow
lowercase__ : str = sink
while v != source:
lowercase__ : int = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
lowercase__ : Union[str, Any] = parent[v]
for i in range(len(lowercase_ ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 495 | 0 |
def snake_case_ () -> int:
return 1
def snake_case_ (__A : int ) -> int:
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def snake_case_ (__A : int ) -> int:
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(__A )
def snake_case_ (__A : int ) -> int:
return 0 if x < 0 else ten_pence(x - 1_0 ) + five_pence(__A )
def snake_case_ (__A : int ) -> int:
return 0 if x < 0 else twenty_pence(x - 2_0 ) + ten_pence(__A )
def snake_case_ (__A : int ) -> int:
return 0 if x < 0 else fifty_pence(x - 5_0 ) + twenty_pence(__A )
def snake_case_ (__A : int ) -> int:
return 0 if x < 0 else one_pound(x - 1_0_0 ) + fifty_pence(__A )
def snake_case_ (__A : int ) -> int:
return 0 if x < 0 else two_pound(x - 2_0_0 ) + one_pound(__A )
def snake_case_ (__A : int = 2_0_0 ) -> int:
return two_pound(__A )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 651 |
from __future__ import annotations
def snake_case_ (__A : list[int] , __A : list[int] , __A : list[int] , __A : list[list[str]] , __A : int , ) -> None:
__lowerCAmelCase : Any = len(__A )
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board] )
return
# We iterate each column in the row to find all possible results in each row
for col in range(__A ):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , __A , __A , )
def snake_case_ (__A : int ) -> None:
__lowerCAmelCase : list[list[str]] = []
depth_first_search([] , [] , [] , __A , __A )
# Print all the boards
for board in boards:
for column in board:
print(__A )
print("""""" )
print(len(__A ) , """solutions were found.""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 651 | 1 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def UpperCamelCase__ ( ):
__lowerCamelCase : Union[str, Any] = ArgumentParser(
description=(
'PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes'
) )
# Optional arguments for the launch helper
parser.add_argument('--num_cores' , type=SCREAMING_SNAKE_CASE__ , default=1 , help='Number of TPU cores to use (1 or 8).' )
# positional
parser.add_argument(
'training_script' , type=SCREAMING_SNAKE_CASE__ , help=(
'The full path to the single TPU training '
'program/script to be launched in parallel, '
'followed by all the arguments for the '
'training script'
) , )
# rest from the training program
parser.add_argument('training_script_args' , nargs=SCREAMING_SNAKE_CASE__ )
return parser.parse_args()
def UpperCamelCase__ ( ):
__lowerCamelCase : Any = parse_args()
# Import training_script as a module.
__lowerCamelCase : Union[str, Any] = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__lowerCamelCase : Tuple = script_fpath.stem
__lowerCamelCase : Tuple = importlib.import_module(SCREAMING_SNAKE_CASE__ )
# Patch sys.argv
__lowerCamelCase : List[Any] = [args.training_script] + args.training_script_args + ['--tpu_num_cores', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 230 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class A_ ( __UpperCamelCase ):
'''simple docstring'''
__snake_case = """naver-clova-ix/donut-base-finetuned-docvqa"""
__snake_case = (
"""This is a tool that answers a question about an document (pdf). It takes an input named `document` which """
"""should be the document containing the information, as well as a `question` that is the question about the """
"""document. It returns a text that contains the answer to the question."""
)
__snake_case = """document_qa"""
__snake_case = AutoProcessor
__snake_case = VisionEncoderDecoderModel
__snake_case = ["""image""", """text"""]
__snake_case = ["""text"""]
def __init__( self: Dict , *a: List[Any] , **a: List[Any] ):
if not is_vision_available():
raise ValueError('Pillow must be installed to use the DocumentQuestionAnsweringTool.' )
super().__init__(*a , **a )
def _snake_case ( self: str , a: "Image" , a: str ):
__lowerCamelCase : str = '<s_docvqa><s_question>{user_input}</s_question><s_answer>'
__lowerCamelCase : Dict = task_prompt.replace('{user_input}' , a )
__lowerCamelCase : Optional[Any] = self.pre_processor.tokenizer(
a , add_special_tokens=a , return_tensors='pt' ).input_ids
__lowerCamelCase : Union[str, Any] = self.pre_processor(a , return_tensors='pt' ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def _snake_case ( self: Optional[Any] , a: Tuple ):
return self.model.generate(
inputs['pixel_values'].to(self.device ) , decoder_input_ids=inputs['decoder_input_ids'].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=a , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=a , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=a , ).sequences
def _snake_case ( self: Optional[Any] , a: Any ):
__lowerCamelCase : Union[str, Any] = self.pre_processor.batch_decode(a )[0]
__lowerCamelCase : List[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , '' )
__lowerCamelCase : Optional[int] = sequence.replace(self.pre_processor.tokenizer.pad_token , '' )
__lowerCamelCase : Optional[int] = re.sub(R'<.*?>' , '' , a , count=1 ).strip() # remove first task start token
__lowerCamelCase : int = self.pre_processor.tokenajson(a )
return sequence["answer"]
| 230 | 1 |
'''simple docstring'''
from __future__ import annotations
import bisect
def __lowerCAmelCase ( a_ , a_ , a_ = 0 , a_ = -1 ) -> Tuple:
'''simple docstring'''
if hi < 0:
SCREAMING_SNAKE_CASE : Tuple = len(a_ )
while lo < hi:
SCREAMING_SNAKE_CASE : List[str] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
SCREAMING_SNAKE_CASE : Optional[int] = mid + 1
else:
SCREAMING_SNAKE_CASE : int = mid
return lo
def __lowerCAmelCase ( a_ , a_ , a_ = 0 , a_ = -1 ) -> Any:
'''simple docstring'''
if hi < 0:
SCREAMING_SNAKE_CASE : List[str] = len(a_ )
while lo < hi:
SCREAMING_SNAKE_CASE : Optional[Any] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
SCREAMING_SNAKE_CASE : Tuple = mid + 1
else:
SCREAMING_SNAKE_CASE : int = mid
return lo
def __lowerCAmelCase ( a_ , a_ , a_ = 0 , a_ = -1 ) -> Tuple:
'''simple docstring'''
sorted_collection.insert(bisect_left(a_ , a_ , a_ , a_ ) , a_ )
def __lowerCAmelCase ( a_ , a_ , a_ = 0 , a_ = -1 ) -> str:
'''simple docstring'''
sorted_collection.insert(bisect_right(a_ , a_ , a_ , a_ ) , a_ )
def __lowerCAmelCase ( a_ , a_ ) -> Optional[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[int] = 0
SCREAMING_SNAKE_CASE : int = len(a_ ) - 1
while left <= right:
SCREAMING_SNAKE_CASE : Union[str, Any] = left + (right - left) // 2
SCREAMING_SNAKE_CASE : Tuple = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
SCREAMING_SNAKE_CASE : Tuple = midpoint - 1
else:
SCREAMING_SNAKE_CASE : str = midpoint + 1
return None
def __lowerCAmelCase ( a_ , a_ ) -> List[str]:
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = bisect.bisect_left(a_ , a_ )
if index != len(a_ ) and sorted_collection[index] == item:
return index
return None
def __lowerCAmelCase ( a_ , a_ , a_ , a_ ) -> Any:
'''simple docstring'''
if right < left:
return None
SCREAMING_SNAKE_CASE : str = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(a_ , a_ , a_ , midpoint - 1 )
else:
return binary_search_by_recursion(a_ , a_ , midpoint + 1 , a_ )
if __name__ == "__main__":
_lowerCAmelCase :Any = input("""Enter numbers separated by comma:\n""").strip()
_lowerCAmelCase :List[Any] = sorted(int(item) for item in user_input.split(""","""))
_lowerCAmelCase :List[str] = int(input("""Enter a single number to be found in the list:\n"""))
_lowerCAmelCase :Optional[int] = binary_search(collection, target)
if result is None:
print(f"""{target} was not found in {collection}.""")
else:
print(f"""{target} was found at position {result} in {collection}.""")
| 251 |
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class UpperCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
lowerCAmelCase = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def _UpperCamelCase ( self , a , a , a ) -> Union[str, Any]:
snake_case_ = AudioClassificationPipeline(model=a , feature_extractor=a )
# test with a raw waveform
snake_case_ = np.zeros((3_40_00,) )
snake_case_ = np.zeros((1_40_00,) )
return audio_classifier, [audioa, audio]
def _UpperCamelCase ( self , a , a ) -> Tuple:
snake_case_ , snake_case_ = examples
snake_case_ = audio_classifier(a )
# by default a model is initialized with num_labels=2
self.assertEqual(
a , [
{'score': ANY(a ), 'label': ANY(a )},
{'score': ANY(a ), 'label': ANY(a )},
] , )
snake_case_ = audio_classifier(a , top_k=1 )
self.assertEqual(
a , [
{'score': ANY(a ), 'label': ANY(a )},
] , )
self.run_torchaudio(a )
@require_torchaudio
def _UpperCamelCase ( self , a ) -> List[str]:
import datasets
# test with a local file
snake_case_ = datasets.load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
snake_case_ = dataset[0]['audio']['array']
snake_case_ = audio_classifier(a )
self.assertEqual(
a , [
{'score': ANY(a ), 'label': ANY(a )},
{'score': ANY(a ), 'label': ANY(a )},
] , )
@require_torch
def _UpperCamelCase ( self ) -> Dict:
snake_case_ = 'anton-l/wav2vec2-random-tiny-classifier'
snake_case_ = pipeline('audio-classification' , model=a )
snake_case_ = np.ones((80_00,) )
snake_case_ = audio_classifier(a , top_k=4 )
snake_case_ = [
{'score': 0.0_842, 'label': 'no'},
{'score': 0.0_838, 'label': 'up'},
{'score': 0.0_837, 'label': 'go'},
{'score': 0.0_834, 'label': 'right'},
]
snake_case_ = [
{'score': 0.0_845, 'label': 'stop'},
{'score': 0.0_844, 'label': 'on'},
{'score': 0.0_841, 'label': 'right'},
{'score': 0.0_834, 'label': 'left'},
]
self.assertIn(nested_simplify(a , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
snake_case_ = {'array': np.ones((80_00,) ), 'sampling_rate': audio_classifier.feature_extractor.sampling_rate}
snake_case_ = audio_classifier(a , top_k=4 )
self.assertIn(nested_simplify(a , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def _UpperCamelCase ( self ) -> str:
import datasets
snake_case_ = 'superb/wav2vec2-base-superb-ks'
snake_case_ = pipeline('audio-classification' , model=a )
snake_case_ = datasets.load_dataset('anton-l/superb_dummy' , 'ks' , split='test' )
snake_case_ = np.array(dataset[3]['speech'] , dtype=np.floataa )
snake_case_ = audio_classifier(a , top_k=4 )
self.assertEqual(
nested_simplify(a , decimals=3 ) , [
{'score': 0.981, 'label': 'go'},
{'score': 0.007, 'label': 'up'},
{'score': 0.006, 'label': '_unknown_'},
{'score': 0.001, 'label': 'down'},
] , )
@require_tf
@unittest.skip('Audio classification is not implemented for TF' )
def _UpperCamelCase ( self ) -> Optional[int]:
pass
| 198 | 0 |
import pytest
from datasets.utils.sharding import _distribute_shards, _number_of_shards_in_gen_kwargs, _split_gen_kwargs
@pytest.mark.parametrize(
'''kwargs, expected''' , [
({'''num_shards''': 0, '''max_num_jobs''': 1}, []),
({'''num_shards''': 10, '''max_num_jobs''': 1}, [range(10 )]),
({'''num_shards''': 10, '''max_num_jobs''': 10}, [range(SCREAMING_SNAKE_CASE , i + 1 ) for i in range(10 )]),
({'''num_shards''': 1, '''max_num_jobs''': 10}, [range(1 )]),
({'''num_shards''': 10, '''max_num_jobs''': 3}, [range(0 , 4 ), range(4 , 7 ), range(7 , 10 )]),
({'''num_shards''': 3, '''max_num_jobs''': 10}, [range(0 , 1 ), range(1 , 2 ), range(2 , 3 )]),
] , )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = _distribute_shards(**SCREAMING_SNAKE_CASE )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, max_num_jobs, expected''' , [
({'''foo''': 0}, 10, [{'''foo''': 0}]),
({'''shards''': [0, 1, 2, 3]}, 1, [{'''shards''': [0, 1, 2, 3]}]),
({'''shards''': [0, 1, 2, 3]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}, {'''shards''': [2]}, {'''shards''': [3]}]),
({'''shards''': [0, 1]}, 4, [{'''shards''': [0]}, {'''shards''': [1]}]),
({'''shards''': [0, 1, 2, 3]}, 2, [{'''shards''': [0, 1]}, {'''shards''': [2, 3]}]),
] , )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = _split_gen_kwargs(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert out == expected
@pytest.mark.parametrize(
'''gen_kwargs, expected''' , [
({'''foo''': 0}, 1),
({'''shards''': [0]}, 1),
({'''shards''': [0, 1, 2, 3]}, 4),
({'''shards''': [0, 1, 2, 3], '''foo''': 0}, 4),
({'''shards''': [0, 1, 2, 3], '''other''': (0, 1)}, 4),
({'''shards''': [0, 1, 2, 3], '''shards2''': [0, 1]}, RuntimeError),
] , )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if expected is RuntimeError:
with pytest.raises(SCREAMING_SNAKE_CASE ):
_number_of_shards_in_gen_kwargs(SCREAMING_SNAKE_CASE )
else:
A_ = _number_of_shards_in_gen_kwargs(SCREAMING_SNAKE_CASE )
assert out == expected
| 563 |
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class _lowercase :
def __init__( self : Dict , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : str=1_3 , lowerCamelCase__ : Any=7 , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : List[Any]=True , lowerCamelCase__ : Optional[Any]=True , lowerCamelCase__ : List[str]=9_9 , lowerCamelCase__ : Optional[Any]=[1, 1, 2] , lowerCamelCase__ : Optional[Any]=1 , lowerCamelCase__ : Union[str, Any]=3_2 , lowerCamelCase__ : int=4 , lowerCamelCase__ : Optional[int]=8 , lowerCamelCase__ : Union[str, Any]=3_7 , lowerCamelCase__ : List[Any]="gelu_new" , lowerCamelCase__ : List[Any]=0.1 , lowerCamelCase__ : Union[str, Any]=0.1 , lowerCamelCase__ : Optional[Any]=0.0 , lowerCamelCase__ : List[Any]=5_1_2 , lowerCamelCase__ : Optional[int]=3 , lowerCamelCase__ : List[Any]=0.02 , lowerCamelCase__ : str=3 , lowerCamelCase__ : Union[str, Any]=4 , lowerCamelCase__ : Union[str, Any]=None , lowerCamelCase__ : List[Any]=False , ) -> Union[str, Any]:
"""simple docstring"""
A_ = parent
A_ = batch_size
A_ = seq_length
A_ = is_training
A_ = use_input_mask
A_ = use_token_type_ids
A_ = use_labels
A_ = vocab_size
A_ = block_sizes
A_ = num_decoder_layers
A_ = d_model
A_ = n_head
A_ = d_head
A_ = d_inner
A_ = hidden_act
A_ = hidden_dropout
A_ = attention_dropout
A_ = activation_dropout
A_ = max_position_embeddings
A_ = type_vocab_size
A_ = 2
A_ = num_labels
A_ = num_choices
A_ = scope
A_ = initializer_std
# Used in the tests to check the size of the first attention layer
A_ = n_head
# Used in the tests to check the size of the first hidden state
A_ = self.d_model
# Used in the tests to check the number of output hidden states/attentions
A_ = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
A_ = self.num_hidden_layers + 2
def UpperCamelCase ( self : List[Any] ) -> Union[str, Any]:
"""simple docstring"""
A_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
A_ = None
if self.use_input_mask:
A_ = random_attention_mask([self.batch_size, self.seq_length] )
A_ = None
if self.use_token_type_ids:
A_ = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
A_ = None
A_ = None
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
A_ = ids_tensor([self.batch_size] , self.num_choices )
A_ = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCamelCase ( self : Optional[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : int , ) -> List[Any]:
"""simple docstring"""
A_ = TFFunnelModel(config=lowerCamelCase__ )
A_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A_ = model(lowerCamelCase__ )
A_ = [input_ids, input_mask]
A_ = model(lowerCamelCase__ )
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
A_ = False
A_ = TFFunnelModel(config=lowerCamelCase__ )
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
A_ = False
A_ = TFFunnelModel(config=lowerCamelCase__ )
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def UpperCamelCase ( self : List[Any] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Tuple , lowerCamelCase__ : Any , lowerCamelCase__ : Tuple , lowerCamelCase__ : Tuple , lowerCamelCase__ : Tuple , ) -> Tuple:
"""simple docstring"""
A_ = TFFunnelBaseModel(config=lowerCamelCase__ )
A_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A_ = model(lowerCamelCase__ )
A_ = [input_ids, input_mask]
A_ = model(lowerCamelCase__ )
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
A_ = False
A_ = TFFunnelBaseModel(config=lowerCamelCase__ )
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
A_ = False
A_ = TFFunnelBaseModel(config=lowerCamelCase__ )
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def UpperCamelCase ( self : Optional[Any] , lowerCamelCase__ : int , lowerCamelCase__ : Dict , lowerCamelCase__ : int , lowerCamelCase__ : Dict , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple , lowerCamelCase__ : Optional[int] , ) -> List[str]:
"""simple docstring"""
A_ = TFFunnelForPreTraining(config=lowerCamelCase__ )
A_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self : List[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : int , lowerCamelCase__ : int , lowerCamelCase__ : List[str] , lowerCamelCase__ : Dict , ) -> List[str]:
"""simple docstring"""
A_ = TFFunnelForMaskedLM(config=lowerCamelCase__ )
A_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCamelCase ( self : str , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Dict , lowerCamelCase__ : int , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : str , lowerCamelCase__ : Tuple , ) -> Union[str, Any]:
"""simple docstring"""
A_ = self.num_labels
A_ = TFFunnelForSequenceClassification(config=lowerCamelCase__ )
A_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self : List[str] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : Dict , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : List[Any] , ) -> Optional[Any]:
"""simple docstring"""
A_ = self.num_choices
A_ = TFFunnelForMultipleChoice(config=lowerCamelCase__ )
A_ = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
A_ = tf.tile(tf.expand_dims(lowerCamelCase__ , 1 ) , (1, self.num_choices, 1) )
A_ = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def UpperCamelCase ( self : Optional[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : Dict , lowerCamelCase__ : str , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Dict , ) -> Union[str, Any]:
"""simple docstring"""
A_ = self.num_labels
A_ = TFFunnelForTokenClassification(config=lowerCamelCase__ )
A_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def UpperCamelCase ( self : Any , lowerCamelCase__ : Any , lowerCamelCase__ : List[str] , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : str , ) -> str:
"""simple docstring"""
A_ = TFFunnelForQuestionAnswering(config=lowerCamelCase__ )
A_ = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
A_ = model(lowerCamelCase__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def UpperCamelCase ( self : Dict ) -> str:
"""simple docstring"""
A_ = self.prepare_config_and_inputs()
(
(
A_
) ,(
A_
) ,(
A_
) ,(
A_
) ,(
A_
) ,(
A_
) ,(
A_
) ,
) = config_and_inputs
A_ = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class _lowercase ( __lowerCamelCase,__lowerCamelCase,unittest.TestCase ):
_lowercase : Optional[Any] = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
_lowercase : List[Any] = (
{
'feature-extraction': (TFFunnelBaseModel, TFFunnelModel),
'fill-mask': TFFunnelForMaskedLM,
'question-answering': TFFunnelForQuestionAnswering,
'text-classification': TFFunnelForSequenceClassification,
'token-classification': TFFunnelForTokenClassification,
'zero-shot': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowercase : Dict = False
_lowercase : Optional[int] = False
def UpperCamelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
A_ = TFFunnelModelTester(self )
A_ = ConfigTester(self , config_class=lowerCamelCase__ )
def UpperCamelCase ( self : int ) -> Dict:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : List[Any] ) -> int:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase__ )
def UpperCamelCase ( self : Any ) -> List[str]:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowerCamelCase__ )
def UpperCamelCase ( self : Dict ) -> Dict:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCamelCase__ )
def UpperCamelCase ( self : Any ) -> Any:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCamelCase__ )
def UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCamelCase__ )
@require_tf
class _lowercase ( __lowerCamelCase,unittest.TestCase ):
_lowercase : Any = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
_lowercase : str = False
_lowercase : Any = False
def UpperCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
A_ = TFFunnelModelTester(self , base=lowerCamelCase__ )
A_ = ConfigTester(self , config_class=lowerCamelCase__ )
def UpperCamelCase ( self : Union[str, Any] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCamelCase ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowerCamelCase__ )
def UpperCamelCase ( self : str ) -> Dict:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCamelCase__ )
def UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCamelCase__ )
| 563 | 1 |
import json
import os
import subprocess
import unittest
from ast import literal_eval
import pytest
from parameterized import parameterized, parameterized_class
from . import is_sagemaker_available
if is_sagemaker_available():
from sagemaker import Session, TrainingJobAnalytics
from sagemaker.huggingface import HuggingFace
@pytest.mark.skipif(
literal_eval(os.getenv('''TEST_SAGEMAKER''' , '''False''' ) ) is not True , reason='''Skipping test because should only be run when releasing minor transformers version''' , )
@pytest.mark.usefixtures('''sm_env''' )
@parameterized_class(
[
{
'''framework''': '''pytorch''',
'''script''': '''run_glue_model_parallelism.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
{
'''framework''': '''pytorch''',
'''script''': '''run_glue.py''',
'''model_name_or_path''': '''roberta-large''',
'''instance_type''': '''ml.p3dn.24xlarge''',
'''results''': {'''train_runtime''': 1_6_0_0, '''eval_accuracy''': 0.3, '''eval_loss''': 1.2},
},
] )
class A_ ( unittest.TestCase ):
"""simple docstring"""
def __UpperCAmelCase ( self : Any ) -> Optional[int]:
if self.framework == "pytorch":
subprocess.run(
F"""cp ./examples/pytorch/text-classification/run_glue.py {self.env.test_path}/run_glue.py""".split() ,encoding='utf-8' ,check=__A ,)
assert hasattr(self ,'env' )
def __UpperCAmelCase ( self : str ,__A : Tuple ) -> int:
# configuration for running training on smdistributed Model Parallel
_lowercase = {
'enabled': True,
'processes_per_host': 8,
}
_lowercase = {
'enabled': True,
'parameters': {
'microbatches': 4,
'placement_strategy': 'spread',
'pipeline': 'interleaved',
'optimize': 'speed',
'partitions': 4,
'ddp': True,
},
}
_lowercase = {'smdistributed': {'modelparallel': smp_options}, 'mpi': mpi_options}
_lowercase = 'trainer' if self.script == 'run_glue.py' else 'smtrainer'
# creates estimator
return HuggingFace(
entry_point=self.script ,source_dir=self.env.test_path ,role=self.env.role ,image_uri=self.env.image_uri ,base_job_name=F"""{self.env.base_job_name}-{instance_count}-smp-{name_extension}""" ,instance_count=__A ,instance_type=self.instance_type ,debugger_hook_config=__A ,hyperparameters={
**self.env.hyperparameters,
'model_name_or_path': self.model_name_or_path,
'max_steps': 500,
} ,metric_definitions=self.env.metric_definitions ,distribution=__A ,py_version='py36' ,)
def __UpperCAmelCase ( self : List[Any] ,__A : Any ) -> Optional[Any]:
TrainingJobAnalytics(__A ).export_csv(F"""{self.env.test_path}/{job_name}_metrics.csv""" )
@parameterized.expand([(1,)] )
def __UpperCAmelCase ( self : Tuple ,__A : Union[str, Any] ) -> Optional[Any]:
# create estimator
_lowercase = self.create_estimator(__A )
# run training
estimator.fit()
# result dataframe
_lowercase = TrainingJobAnalytics(estimator.latest_training_job.name ).dataframe()
# extract kpis
_lowercase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_accuracy']['value'] )
_lowercase = list(result_metrics_df[result_metrics_df.metric_name == 'eval_loss']['value'] )
# get train time from SageMaker job, this includes starting, preprocessing, stopping
_lowercase = (
Session().describe_training_job(estimator.latest_training_job.name ).get('TrainingTimeInSeconds' ,99_9999 )
)
# assert kpis
assert train_runtime <= self.results["train_runtime"]
assert all(t >= self.results['eval_accuracy'] for t in eval_accuracy )
assert all(t <= self.results['eval_loss'] for t in eval_loss )
# dump tests result into json file to share in PR
with open(F"""{estimator.latest_training_job.name}.json""" ,'w' ) as outfile:
json.dump({'train_time': train_runtime, 'eval_accuracy': eval_accuracy, 'eval_loss': eval_loss} ,__A ) | 67 |
'''simple docstring'''
import argparse
import os
import jax as jnp
import numpy as onp
import torch
import torch.nn as nn
from music_spectrogram_diffusion import inference
from tax import checkpoints
from diffusers import DDPMScheduler, OnnxRuntimeModel, SpectrogramDiffusionPipeline
from diffusers.pipelines.spectrogram_diffusion import SpectrogramContEncoder, SpectrogramNotesEncoder, TaFilmDecoder
a = 'base_with_context'
def a_ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
"""simple docstring"""
snake_case: Optional[Any] =nn.Parameter(torch.FloatTensor(weights['token_embedder']['embedding'] ) )
snake_case: Tuple =nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__UpperCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
snake_case: Dict =weights[f'''layers_{lyr_num}''']
snake_case: str =nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
snake_case: Any =ly_weight['attention']
snake_case: Dict =nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
snake_case: str =nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
snake_case: Dict =nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
snake_case: Optional[Any] =nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
snake_case: List[Any] =nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
snake_case: Optional[int] =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
snake_case: Union[str, Any] =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
snake_case: Optional[Any] =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
snake_case: Any =nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def a_ ( __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
"""simple docstring"""
snake_case: Union[str, Any] =nn.Parameter(torch.FloatTensor(weights['input_proj']['kernel'].T ) )
snake_case: Dict =nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__UpperCAmelCase )
for lyr_num, lyr in enumerate(model.encoders ):
snake_case: List[Any] =weights[f'''layers_{lyr_num}''']
snake_case: Tuple =ly_weight['attention']
snake_case: str =nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
snake_case: Optional[int] =nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
snake_case: int =nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
snake_case: Union[str, Any] =nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
snake_case: Optional[Any] =nn.Parameter(
torch.FloatTensor(ly_weight['pre_attention_layer_norm']['scale'] ) )
snake_case: Optional[Any] =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
snake_case: Tuple =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
snake_case: Optional[int] =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
snake_case: Any =nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
snake_case: List[str] =nn.Parameter(torch.FloatTensor(weights['encoder_norm']['scale'] ) )
return model
def a_ ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
"""simple docstring"""
snake_case: Optional[Any] =nn.Parameter(torch.FloatTensor(weights['time_emb_dense0']['kernel'].T ) )
snake_case: Dict =nn.Parameter(torch.FloatTensor(weights['time_emb_dense1']['kernel'].T ) )
snake_case: Tuple =nn.Parameter(
torch.FloatTensor(weights['Embed_0']['embedding'] ) , requires_grad=__UpperCAmelCase )
snake_case: Any =nn.Parameter(
torch.FloatTensor(weights['continuous_inputs_projection']['kernel'].T ) )
for lyr_num, lyr in enumerate(model.decoders ):
snake_case: List[str] =weights[f'''layers_{lyr_num}''']
snake_case: Any =nn.Parameter(
torch.FloatTensor(ly_weight['pre_self_attention_layer_norm']['scale'] ) )
snake_case: int =nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_0']['DenseGeneral_0']['kernel'].T ) )
snake_case: str =ly_weight['self_attention']
snake_case: str =nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
snake_case: Dict =nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
snake_case: Dict =nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
snake_case: List[str] =nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
snake_case: Optional[Any] =ly_weight['MultiHeadDotProductAttention_0']
snake_case: int =nn.Parameter(torch.FloatTensor(attention_weights['query']['kernel'].T ) )
snake_case: List[str] =nn.Parameter(torch.FloatTensor(attention_weights['key']['kernel'].T ) )
snake_case: Dict =nn.Parameter(torch.FloatTensor(attention_weights['value']['kernel'].T ) )
snake_case: Optional[Any] =nn.Parameter(torch.FloatTensor(attention_weights['out']['kernel'].T ) )
snake_case: Any =nn.Parameter(
torch.FloatTensor(ly_weight['pre_cross_attention_layer_norm']['scale'] ) )
snake_case: int =nn.Parameter(torch.FloatTensor(ly_weight['pre_mlp_layer_norm']['scale'] ) )
snake_case: Union[str, Any] =nn.Parameter(
torch.FloatTensor(ly_weight['FiLMLayer_1']['DenseGeneral_0']['kernel'].T ) )
snake_case: int =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_0']['kernel'].T ) )
snake_case: Optional[int] =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wi_1']['kernel'].T ) )
snake_case: Union[str, Any] =nn.Parameter(torch.FloatTensor(ly_weight['mlp']['wo']['kernel'].T ) )
snake_case: Optional[Any] =nn.Parameter(torch.FloatTensor(weights['decoder_norm']['scale'] ) )
snake_case: int =nn.Parameter(torch.FloatTensor(weights['spec_out_dense']['kernel'].T ) )
return model
def a_ ( __UpperCAmelCase ) -> Dict:
"""simple docstring"""
snake_case: Union[str, Any] =checkpoints.load_tax_checkpoint(args.checkpoint_path )
snake_case: Tuple =jnp.tree_util.tree_map(onp.array , __UpperCAmelCase )
snake_case: str =[
'from __gin__ import dynamic_registration',
'from music_spectrogram_diffusion.models.diffusion import diffusion_utils',
'diffusion_utils.ClassifierFreeGuidanceConfig.eval_condition_weight = 2.0',
'diffusion_utils.DiffusionConfig.classifier_free_guidance = @diffusion_utils.ClassifierFreeGuidanceConfig()',
]
snake_case: List[Any] =os.path.join(args.checkpoint_path , '..' , 'config.gin' )
snake_case: Optional[Any] =inference.parse_training_gin_file(__UpperCAmelCase , __UpperCAmelCase )
snake_case: List[str] =inference.InferenceModel(args.checkpoint_path , __UpperCAmelCase )
snake_case: List[Any] =DDPMScheduler(beta_schedule='squaredcos_cap_v2' , variance_type='fixed_large' )
snake_case: Optional[Any] =SpectrogramNotesEncoder(
max_length=synth_model.sequence_length['inputs'] , vocab_size=synth_model.model.module.config.vocab_size , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
snake_case: Optional[Any] =SpectrogramContEncoder(
input_dims=synth_model.audio_codec.n_dims , targets_context_length=synth_model.sequence_length['targets_context'] , d_model=synth_model.model.module.config.emb_dim , dropout_rate=synth_model.model.module.config.dropout_rate , num_layers=synth_model.model.module.config.num_encoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , feed_forward_proj='gated-gelu' , )
snake_case: List[Any] =TaFilmDecoder(
input_dims=synth_model.audio_codec.n_dims , targets_length=synth_model.sequence_length['targets_context'] , max_decoder_noise_time=synth_model.model.module.config.max_decoder_noise_time , d_model=synth_model.model.module.config.emb_dim , num_layers=synth_model.model.module.config.num_decoder_layers , num_heads=synth_model.model.module.config.num_heads , d_kv=synth_model.model.module.config.head_dim , d_ff=synth_model.model.module.config.mlp_dim , dropout_rate=synth_model.model.module.config.dropout_rate , )
snake_case: Optional[Any] =load_notes_encoder(ta_checkpoint['target']['token_encoder'] , __UpperCAmelCase )
snake_case: Optional[Any] =load_continuous_encoder(ta_checkpoint['target']['continuous_encoder'] , __UpperCAmelCase )
snake_case: Union[str, Any] =load_decoder(ta_checkpoint['target']['decoder'] , __UpperCAmelCase )
snake_case: int =OnnxRuntimeModel.from_pretrained('kashif/soundstream_mel_decoder' )
snake_case: Optional[Any] =SpectrogramDiffusionPipeline(
notes_encoder=__UpperCAmelCase , continuous_encoder=__UpperCAmelCase , decoder=__UpperCAmelCase , scheduler=__UpperCAmelCase , melgan=__UpperCAmelCase , )
if args.save:
pipe.save_pretrained(args.output_path )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument('--output_path', default=None, type=str, required=True, help='Path to the converted model.')
parser.add_argument(
'--save', default=True, type=bool, required=False, help='Whether to save the converted model or not.'
)
parser.add_argument(
'--checkpoint_path',
default=F"""{MODEL}/checkpoint_500000""",
type=str,
required=False,
help='Path to the original jax model checkpoint.',
)
a = parser.parse_args()
main(args)
| 350 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case__ : Any = logging.get_logger(__name__)
snake_case__ : Dict = {
'tiiuae/falcon-40b': 'https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json',
'tiiuae/falcon-7b': 'https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE_ (a__ ):
'''simple docstring'''
_a = "falcon"
_a = ["past_key_values"]
def __init__( self : Dict , __a : List[Any]=65_024 , __a : List[str]=4_544 , __a : Optional[Any]=32 , __a : Optional[int]=71 , __a : Any=1e-5 , __a : List[str]=0.02 , __a : Optional[Any]=True , __a : Optional[Any]=0.0 , __a : Union[str, Any]=0.0 , __a : List[str]=None , __a : Any=False , __a : List[str]=False , __a : Union[str, Any]=True , __a : str=True , __a : Any=False , __a : int=11 , __a : Any=11 , **__a : Dict , ) ->Optional[Any]:
lowerCamelCase_ : List[Any] = vocab_size
# Backward compatibility with n_embed kwarg
lowerCamelCase_ : int = kwargs.pop("""n_embed""" , __a )
lowerCamelCase_ : Union[str, Any] = hidden_size if n_embed is None else n_embed
lowerCamelCase_ : Tuple = num_hidden_layers
lowerCamelCase_ : Optional[int] = num_attention_heads
lowerCamelCase_ : Union[str, Any] = layer_norm_epsilon
lowerCamelCase_ : Union[str, Any] = initializer_range
lowerCamelCase_ : Any = use_cache
lowerCamelCase_ : str = hidden_dropout
lowerCamelCase_ : str = attention_dropout
lowerCamelCase_ : int = bos_token_id
lowerCamelCase_ : Optional[int] = eos_token_id
lowerCamelCase_ : str = num_attention_heads if num_kv_heads is None else num_kv_heads
lowerCamelCase_ : int = alibi
lowerCamelCase_ : Tuple = new_decoder_architecture
lowerCamelCase_ : Union[str, Any] = multi_query # Ignored when new_decoder_architecture is True
lowerCamelCase_ : Dict = parallel_attn
lowerCamelCase_ : List[str] = bias
super().__init__(bos_token_id=__a , eos_token_id=__a , **__a )
@property
def _lowerCAmelCase ( self : Union[str, Any] ) ->Union[str, Any]:
return self.hidden_size // self.num_attention_heads
@property
def _lowerCAmelCase ( self : Optional[int] ) ->Tuple:
return not self.alibi
| 171 |
import qiskit
def __lowerCamelCase ( A__ : int = 2 ) -> qiskit.result.counts.Counts:
lowerCamelCase_ : List[Any] = qubits
# Using Aer's simulator
lowerCamelCase_ : Tuple = qiskit.Aer.get_backend("""aer_simulator""" )
# Creating a Quantum Circuit acting on the q register
lowerCamelCase_ : int = qiskit.QuantumCircuit(A__ , A__ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , A__ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , A__ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(A__ ) ) , list(range(A__ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
lowerCamelCase_ : Union[str, Any] = qiskit.execute(A__ , A__ , shots=1000 )
return job.result().get_counts(A__ )
if __name__ == "__main__":
print(F'Total count for various states are: {quantum_entanglement(3)}')
| 171 | 1 |
def lowerCAmelCase_ ( lowercase: int = 1_000 ) -> int:
'''simple docstring'''
return sum(e for e in range(3 , lowercase ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f"""{solution() = }""") | 271 | import unittest
import numpy as np
import timeout_decorator # noqa
from transformers import BlenderbotConfig, is_flax_available
from transformers.testing_utils import jax_device, require_flax, slow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
UpperCAmelCase_ = '''platform'''
import jax
import jax.numpy as jnp
from transformers import BlenderbotTokenizer
from transformers.models.blenderbot.modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
shift_tokens_right,
)
def lowerCAmelCase_ ( lowercase: Any , lowercase: Optional[int] , lowercase: Union[str, Any]=None , lowercase: int=None , lowercase: Any=None , lowercase: Optional[int]=None , lowercase: int=None , lowercase: int=None , ) -> int:
'''simple docstring'''
if attention_mask is None:
_UpperCamelCase: Tuple = np.where(input_ids != config.pad_token_id , 1 , 0 )
if decoder_attention_mask is None:
_UpperCamelCase: Any = np.where(decoder_input_ids != config.pad_token_id , 1 , 0 )
if head_mask is None:
_UpperCamelCase: List[str] = np.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
_UpperCamelCase: Optional[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
_UpperCamelCase: Optional[Any] = np.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
}
class __magic_name__ :
"""simple docstring"""
def __init__( self : Optional[int] , _lowercase : Tuple , _lowercase : int=13 , _lowercase : Any=7 , _lowercase : List[str]=True , _lowercase : int=False , _lowercase : Union[str, Any]=99 , _lowercase : Optional[Any]=16 , _lowercase : int=2 , _lowercase : Any=4 , _lowercase : Any=4 , _lowercase : Optional[Any]="gelu" , _lowercase : Optional[Any]=0.1 , _lowercase : Any=0.1 , _lowercase : List[Any]=32 , _lowercase : str=2 , _lowercase : List[Any]=1 , _lowercase : int=0 , _lowercase : int=0.02 , ):
"""simple docstring"""
_UpperCamelCase: int = parent
_UpperCamelCase: Any = batch_size
_UpperCamelCase: Union[str, Any] = seq_length
_UpperCamelCase: List[Any] = is_training
_UpperCamelCase: Any = use_labels
_UpperCamelCase: Any = vocab_size
_UpperCamelCase: List[Any] = hidden_size
_UpperCamelCase: str = num_hidden_layers
_UpperCamelCase: List[Any] = num_attention_heads
_UpperCamelCase: Dict = intermediate_size
_UpperCamelCase: str = hidden_act
_UpperCamelCase: int = hidden_dropout_prob
_UpperCamelCase: Dict = attention_probs_dropout_prob
_UpperCamelCase: Dict = max_position_embeddings
_UpperCamelCase: List[str] = eos_token_id
_UpperCamelCase: str = pad_token_id
_UpperCamelCase: List[Any] = bos_token_id
_UpperCamelCase: List[Any] = initializer_range
def lowerCAmelCase ( self : str ):
"""simple docstring"""
_UpperCamelCase: Optional[Any] = np.clip(ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ) , 3 , self.vocab_size )
_UpperCamelCase: Union[str, Any] = np.concatenate((input_ids, 2 * np.ones((self.batch_size, 1) , dtype=np.intaa )) , -1 )
_UpperCamelCase: Tuple = shift_tokens_right(_lowercase , 1 , 2 )
_UpperCamelCase: List[Any] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , initializer_range=self.initializer_range , use_cache=_lowercase , )
_UpperCamelCase: int = prepare_blenderbot_inputs_dict(_lowercase , _lowercase , _lowercase )
return config, inputs_dict
def lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase: Dict = self.prepare_config_and_inputs()
return config, inputs_dict
def lowerCAmelCase ( self : Optional[int] , _lowercase : Tuple , _lowercase : Optional[Any] , _lowercase : Any ):
"""simple docstring"""
_UpperCamelCase: str = 20
_UpperCamelCase: Union[str, Any] = model_class_name(_lowercase )
_UpperCamelCase: Union[str, Any] = model.encode(inputs_dict['''input_ids'''] )
_UpperCamelCase , _UpperCamelCase: Optional[Any] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_UpperCamelCase: Tuple = model.init_cache(decoder_input_ids.shape[0] , _lowercase , _lowercase )
_UpperCamelCase: Optional[Any] = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='''i4''' )
_UpperCamelCase: List[Any] = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCamelCase: List[str] = model.decode(
decoder_input_ids[:, :-1] , _lowercase , decoder_attention_mask=_lowercase , past_key_values=_lowercase , decoder_position_ids=_lowercase , )
_UpperCamelCase: List[Any] = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
_UpperCamelCase: List[Any] = model.decode(
decoder_input_ids[:, -1:] , _lowercase , decoder_attention_mask=_lowercase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=_lowercase , )
_UpperCamelCase: List[Any] = model.decode(_lowercase , _lowercase )
_UpperCamelCase: List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
def lowerCAmelCase ( self : List[str] , _lowercase : List[str] , _lowercase : Optional[Any] , _lowercase : Optional[Any] ):
"""simple docstring"""
_UpperCamelCase: Dict = 20
_UpperCamelCase: int = model_class_name(_lowercase )
_UpperCamelCase: Union[str, Any] = model.encode(inputs_dict['''input_ids'''] )
_UpperCamelCase , _UpperCamelCase: Optional[int] = (
inputs_dict['''decoder_input_ids'''],
inputs_dict['''decoder_attention_mask'''],
)
_UpperCamelCase: List[Any] = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_UpperCamelCase: List[Any] = model.init_cache(decoder_input_ids.shape[0] , _lowercase , _lowercase )
_UpperCamelCase: Any = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCamelCase: Tuple = model.decode(
decoder_input_ids[:, :-1] , _lowercase , decoder_attention_mask=_lowercase , past_key_values=_lowercase , decoder_position_ids=_lowercase , )
_UpperCamelCase: str = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='''i4''' )
_UpperCamelCase: Optional[int] = model.decode(
decoder_input_ids[:, -1:] , _lowercase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=_lowercase , decoder_position_ids=_lowercase , )
_UpperCamelCase: Any = model.decode(_lowercase , _lowercase , decoder_attention_mask=_lowercase )
_UpperCamelCase: Optional[Any] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"""Max diff is {diff}""" )
@require_flax
class __magic_name__ ( unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase : List[str] = 9_9
def lowerCAmelCase ( self : int ):
"""simple docstring"""
_UpperCamelCase: Any = np.array(
[
[71, 82, 18, 33, 46, 91, 2],
[68, 34, 26, 58, 30, 82, 2],
[5, 97, 17, 39, 94, 40, 2],
[76, 83, 94, 25, 70, 78, 2],
[87, 59, 41, 35, 48, 66, 2],
[55, 13, 16, 58, 5, 2, 1], # note padding
[64, 27, 31, 51, 12, 75, 2],
[52, 64, 86, 17, 83, 39, 2],
[48, 61, 9, 24, 71, 82, 2],
[26, 1, 60, 48, 22, 13, 2],
[21, 5, 62, 28, 14, 76, 2],
[45, 98, 37, 86, 59, 48, 2],
[70, 70, 50, 9, 28, 0, 2],
] , dtype=np.intaa , )
_UpperCamelCase: Dict = input_ids.shape[0]
_UpperCamelCase: int = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=24 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=32 , decoder_ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
def lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase: Optional[int] = self._get_config_and_data()
_UpperCamelCase: Dict = FlaxBlenderbotForConditionalGeneration(_lowercase )
_UpperCamelCase: Any = lm_model(input_ids=_lowercase )
_UpperCamelCase: Optional[Any] = (batch_size, input_ids.shape[1], config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _lowercase )
def lowerCAmelCase ( self : Dict ):
"""simple docstring"""
_UpperCamelCase: List[str] = BlenderbotConfig(
vocab_size=self.vocab_size , d_model=14 , encoder_layers=2 , decoder_layers=2 , encoder_attention_heads=2 , decoder_attention_heads=2 , encoder_ffn_dim=8 , decoder_ffn_dim=8 , max_position_embeddings=48 , )
_UpperCamelCase: int = FlaxBlenderbotForConditionalGeneration(_lowercase )
_UpperCamelCase: List[str] = np.array([[71, 82, 18, 33, 46, 91, 2], [68, 34, 26, 58, 30, 2, 1]] , dtype=np.intaa )
_UpperCamelCase: Optional[Any] = np.array([[82, 71, 82, 18, 2], [58, 68, 2, 1, 1]] , dtype=np.intaa )
_UpperCamelCase: Tuple = lm_model(input_ids=_lowercase , decoder_input_ids=_lowercase )
_UpperCamelCase: Optional[Any] = (*summary.shape, config.vocab_size)
self.assertEqual(outputs['''logits'''].shape , _lowercase )
def lowerCAmelCase ( self : Optional[int] ):
"""simple docstring"""
_UpperCamelCase: Dict = np.array([[71, 82, 18, 33, 2, 1, 1], [68, 34, 26, 58, 30, 82, 2]] , dtype=np.intaa )
_UpperCamelCase: Dict = shift_tokens_right(_lowercase , 1 , 2 )
_UpperCamelCase: Tuple = np.equal(_lowercase , 1 ).astype(np.floataa ).sum()
_UpperCamelCase: Optional[Any] = np.equal(_lowercase , 1 ).astype(np.floataa ).sum()
self.assertEqual(shifted.shape , input_ids.shape )
self.assertEqual(_lowercase , n_pad_before - 1 )
self.assertTrue(np.equal(shifted[:, 0] , 2 ).all() )
@require_flax
class __magic_name__ ( __a , unittest.TestCase , __a ):
"""simple docstring"""
lowerCAmelCase : Optional[Any] = True
lowerCAmelCase : Optional[int] = (
(
FlaxBlenderbotModel,
FlaxBlenderbotForConditionalGeneration,
)
if is_flax_available()
else ()
)
lowerCAmelCase : Optional[int] = (FlaxBlenderbotForConditionalGeneration,) if is_flax_available() else ()
def lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
_UpperCamelCase: List[Any] = FlaxBlenderbotModelTester(self )
def lowerCAmelCase ( self : Tuple ):
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase: List[Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(_lowercase , _lowercase , _lowercase )
def lowerCAmelCase ( self : List[str] ):
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase: Tuple = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(_lowercase , _lowercase , _lowercase )
def lowerCAmelCase ( self : Any ):
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase: List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCamelCase: Union[str, Any] = self._prepare_for_class(_lowercase , _lowercase )
_UpperCamelCase: Any = model_class(_lowercase )
@jax.jit
def encode_jitted(_lowercase : Optional[Any] , _lowercase : Any=None , **_lowercase : List[Any] ):
return model.encode(input_ids=_lowercase , attention_mask=_lowercase )
with self.subTest('''JIT Enabled''' ):
_UpperCamelCase: List[Any] = encode_jitted(**_lowercase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_UpperCamelCase: str = encode_jitted(**_lowercase ).to_tuple()
self.assertEqual(len(_lowercase ) , len(_lowercase ) )
for jitted_output, output in zip(_lowercase , _lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
def lowerCAmelCase ( self : Union[str, Any] ):
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase: Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCamelCase: Tuple = model_class(_lowercase )
_UpperCamelCase: Optional[Any] = model.encode(inputs_dict['''input_ids'''] , inputs_dict['''attention_mask'''] )
_UpperCamelCase: Optional[int] = {
'''decoder_input_ids''': inputs_dict['''decoder_input_ids'''],
'''decoder_attention_mask''': inputs_dict['''decoder_attention_mask'''],
'''encoder_outputs''': encoder_outputs,
}
@jax.jit
def decode_jitted(_lowercase : Any , _lowercase : Union[str, Any] , _lowercase : str ):
return model.decode(
decoder_input_ids=_lowercase , decoder_attention_mask=_lowercase , encoder_outputs=_lowercase , )
with self.subTest('''JIT Enabled''' ):
_UpperCamelCase: Union[str, Any] = decode_jitted(**_lowercase ).to_tuple()
with self.subTest('''JIT Disabled''' ):
with jax.disable_jit():
_UpperCamelCase: Optional[Any] = decode_jitted(**_lowercase ).to_tuple()
self.assertEqual(len(_lowercase ) , len(_lowercase ) )
for jitted_output, output in zip(_lowercase , _lowercase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def lowerCAmelCase ( self : List[Any] ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
_UpperCamelCase: Optional[int] = model_class_name.from_pretrained('''facebook/blenderbot-400M-distill''' )
# FlaxBlenderbotForSequenceClassification expects eos token in input_ids
_UpperCamelCase: List[str] = np.ones((1, 1) ) * model.config.eos_token_id
_UpperCamelCase: List[Any] = model(_lowercase )
self.assertIsNotNone(_lowercase )
@unittest.skipUnless(jax_device != '''cpu''' , '''3B test too slow on CPU.''' )
@slow
def lowerCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
_UpperCamelCase: Union[str, Any] = {'''num_beams''': 1, '''early_stopping''': True, '''min_length''': 15, '''max_length''': 25}
_UpperCamelCase: Union[str, Any] = {'''skip_special_tokens''': True, '''clean_up_tokenization_spaces''': True}
_UpperCamelCase: Union[str, Any] = FlaxBlenderbotForConditionalGeneration.from_pretrained('''facebook/blenderbot-3B''' , from_pt=_lowercase )
_UpperCamelCase: Any = BlenderbotTokenizer.from_pretrained('''facebook/blenderbot-3B''' )
_UpperCamelCase: Optional[Any] = ['''Sam''']
_UpperCamelCase: List[str] = tokenizer(_lowercase , return_tensors='''jax''' )
_UpperCamelCase: Dict = model.generate(**_lowercase , **_lowercase )
_UpperCamelCase: Dict = '''Sam is a great name. It means "sun" in Gaelic.'''
_UpperCamelCase: List[Any] = tokenizer.batch_decode(_lowercase , **_lowercase )
assert generated_txt[0].strip() == tgt_text | 271 | 1 |
'''simple docstring'''
def lowercase__ ( __UpperCamelCase )-> float:
if edge <= 0 or not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError("""Length must be a positive.""" )
return 3 * ((25 + 10 * (5 ** (1 / 2))) ** (1 / 2)) * (edge**2)
def lowercase__ ( __UpperCamelCase )-> float:
if edge <= 0 or not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError("""Length must be a positive.""" )
return ((15 + (7 * (5 ** (1 / 2)))) / 4) * (edge**3)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 35 |
'''simple docstring'''
def lowercase__ ( __UpperCamelCase )-> str:
if not all(char in """01""" for char in bin_string ):
raise ValueError("""Non-binary value was passed to the function""" )
if not bin_string:
raise ValueError("""Empty string was passed to the function""" )
UpperCamelCase = """"""
while len(__UpperCamelCase ) % 3 != 0:
UpperCamelCase = """0""" + bin_string
UpperCamelCase = [
bin_string[index : index + 3]
for index in range(len(__UpperCamelCase ) )
if index % 3 == 0
]
for bin_group in bin_string_in_3_list:
UpperCamelCase = 0
for index, val in enumerate(__UpperCamelCase ):
oct_val += int(2 ** (2 - index) * int(__UpperCamelCase ) )
oct_string += str(__UpperCamelCase )
return oct_string
if __name__ == "__main__":
from doctest import testmod
testmod()
| 35 | 1 |
'''simple docstring'''
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class a__ :
@staticmethod
def SCREAMING_SNAKE_CASE__ ( *a : int , **a : List[Any] ):
"""simple docstring"""
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class a__ ( unittest.TestCase ):
lowerCamelCase : List[Any] =MODEL_FOR_OBJECT_DETECTION_MAPPING
def SCREAMING_SNAKE_CASE__ ( self : str , a : Tuple , a : Tuple , a : str ):
"""simple docstring"""
__lowerCamelCase = ObjectDetectionPipeline(model=a , image_processor=a )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def SCREAMING_SNAKE_CASE__ ( self : int , a : List[str] , a : int ):
"""simple docstring"""
__lowerCamelCase = object_detector('''./tests/fixtures/tests_samples/COCO/000000039769.png''' , threshold=0.0 )
self.assertGreater(len(a ) , 0 )
for detected_object in outputs:
self.assertEqual(
a , {
'''score''': ANY(a ),
'''label''': ANY(a ),
'''box''': {'''xmin''': ANY(a ), '''ymin''': ANY(a ), '''xmax''': ANY(a ), '''ymax''': ANY(a )},
} , )
import datasets
__lowerCamelCase = datasets.load_dataset('''hf-internal-testing/fixtures_image_utils''' , '''image''' , split='''test''' )
__lowerCamelCase = [
Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' ),
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
# RGBA
dataset[0]['''file'''],
# LA
dataset[1]['''file'''],
# L
dataset[2]['''file'''],
]
__lowerCamelCase = object_detector(a , threshold=0.0 )
self.assertEqual(len(a ) , len(a ) )
for outputs in batch_outputs:
self.assertGreater(len(a ) , 0 )
for detected_object in outputs:
self.assertEqual(
a , {
'''score''': ANY(a ),
'''label''': ANY(a ),
'''box''': {'''xmin''': ANY(a ), '''ymin''': ANY(a ), '''xmax''': ANY(a ), '''ymax''': ANY(a )},
} , )
@require_tf
@unittest.skip('''Object detection not implemented in TF''' )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
pass
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = '''hf-internal-testing/tiny-detr-mobilenetsv3'''
__lowerCamelCase = AutoModelForObjectDetection.from_pretrained(a )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(a )
__lowerCamelCase = ObjectDetectionPipeline(model=a , feature_extractor=a )
__lowerCamelCase = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=0.0 )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{'''score''': 0.33_76, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_59, '''ymin''': 1_20, '''xmax''': 4_80, '''ymax''': 3_59}},
{'''score''': 0.33_76, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_59, '''ymin''': 1_20, '''xmax''': 4_80, '''ymax''': 3_59}},
] , )
__lowerCamelCase = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{'''score''': 0.33_76, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_59, '''ymin''': 1_20, '''xmax''': 4_80, '''ymax''': 3_59}},
{'''score''': 0.33_76, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_59, '''ymin''': 1_20, '''xmax''': 4_80, '''ymax''': 3_59}},
],
[
{'''score''': 0.33_76, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_59, '''ymin''': 1_20, '''xmax''': 4_80, '''ymax''': 3_59}},
{'''score''': 0.33_76, '''label''': '''LABEL_0''', '''box''': {'''xmin''': 1_59, '''ymin''': 1_20, '''xmax''': 4_80, '''ymax''': 3_59}},
],
] , )
@require_torch
@slow
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = '''facebook/detr-resnet-50'''
__lowerCamelCase = AutoModelForObjectDetection.from_pretrained(a )
__lowerCamelCase = AutoFeatureExtractor.from_pretrained(a )
__lowerCamelCase = ObjectDetectionPipeline(model=a , feature_extractor=a )
__lowerCamelCase = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{'''score''': 0.99_82, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 1_75, '''ymax''': 1_17}},
{'''score''': 0.99_60, '''label''': '''remote''', '''box''': {'''xmin''': 3_33, '''ymin''': 72, '''xmax''': 3_68, '''ymax''': 1_87}},
{'''score''': 0.99_55, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_39, '''ymax''': 4_73}},
{'''score''': 0.99_88, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}},
{'''score''': 0.99_87, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}},
] , )
__lowerCamelCase = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{'''score''': 0.99_82, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 1_75, '''ymax''': 1_17}},
{'''score''': 0.99_60, '''label''': '''remote''', '''box''': {'''xmin''': 3_33, '''ymin''': 72, '''xmax''': 3_68, '''ymax''': 1_87}},
{'''score''': 0.99_55, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_39, '''ymax''': 4_73}},
{'''score''': 0.99_88, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}},
{'''score''': 0.99_87, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}},
],
[
{'''score''': 0.99_82, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 1_75, '''ymax''': 1_17}},
{'''score''': 0.99_60, '''label''': '''remote''', '''box''': {'''xmin''': 3_33, '''ymin''': 72, '''xmax''': 3_68, '''ymax''': 1_87}},
{'''score''': 0.99_55, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_39, '''ymax''': 4_73}},
{'''score''': 0.99_88, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}},
{'''score''': 0.99_87, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}},
],
] , )
@require_torch
@slow
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
__lowerCamelCase = '''facebook/detr-resnet-50'''
__lowerCamelCase = pipeline('''object-detection''' , model=a )
__lowerCamelCase = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{'''score''': 0.99_82, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 1_75, '''ymax''': 1_17}},
{'''score''': 0.99_60, '''label''': '''remote''', '''box''': {'''xmin''': 3_33, '''ymin''': 72, '''xmax''': 3_68, '''ymax''': 1_87}},
{'''score''': 0.99_55, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_39, '''ymax''': 4_73}},
{'''score''': 0.99_88, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}},
{'''score''': 0.99_87, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}},
] , )
__lowerCamelCase = object_detector(
[
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
'''http://images.cocodataset.org/val2017/000000039769.jpg''',
] )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
[
{'''score''': 0.99_82, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 1_75, '''ymax''': 1_17}},
{'''score''': 0.99_60, '''label''': '''remote''', '''box''': {'''xmin''': 3_33, '''ymin''': 72, '''xmax''': 3_68, '''ymax''': 1_87}},
{'''score''': 0.99_55, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_39, '''ymax''': 4_73}},
{'''score''': 0.99_88, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}},
{'''score''': 0.99_87, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}},
],
[
{'''score''': 0.99_82, '''label''': '''remote''', '''box''': {'''xmin''': 40, '''ymin''': 70, '''xmax''': 1_75, '''ymax''': 1_17}},
{'''score''': 0.99_60, '''label''': '''remote''', '''box''': {'''xmin''': 3_33, '''ymin''': 72, '''xmax''': 3_68, '''ymax''': 1_87}},
{'''score''': 0.99_55, '''label''': '''couch''', '''box''': {'''xmin''': 0, '''ymin''': 1, '''xmax''': 6_39, '''ymax''': 4_73}},
{'''score''': 0.99_88, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}},
{'''score''': 0.99_87, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}},
],
] , )
@require_torch
@slow
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
__lowerCamelCase = 0.99_85
__lowerCamelCase = '''facebook/detr-resnet-50'''
__lowerCamelCase = pipeline('''object-detection''' , model=a )
__lowerCamelCase = object_detector('''http://images.cocodataset.org/val2017/000000039769.jpg''' , threshold=a )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{'''score''': 0.99_88, '''label''': '''cat''', '''box''': {'''xmin''': 13, '''ymin''': 52, '''xmax''': 3_14, '''ymax''': 4_70}},
{'''score''': 0.99_87, '''label''': '''cat''', '''box''': {'''xmin''': 3_45, '''ymin''': 23, '''xmax''': 6_40, '''ymax''': 3_68}},
] , )
@require_torch
@require_pytesseract
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
__lowerCamelCase = '''Narsil/layoutlmv3-finetuned-funsd'''
__lowerCamelCase = 0.99_93
__lowerCamelCase = pipeline('''object-detection''' , model=a , threshold=a )
__lowerCamelCase = object_detector(
'''https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png''' )
self.assertEqual(
nested_simplify(a , decimals=4 ) , [
{'''score''': 0.99_93, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 2_94, '''ymin''': 2_54, '''xmax''': 3_43, '''ymax''': 2_64}},
{'''score''': 0.99_93, '''label''': '''I-ANSWER''', '''box''': {'''xmin''': 2_94, '''ymin''': 2_54, '''xmax''': 3_43, '''ymax''': 2_64}},
] , )
| 546 | '''simple docstring'''
def __lowerCAmelCase ( UpperCamelCase__ ) -> bool:
if number < 0:
raise ValueError('''number must not be negative''' )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 546 | 1 |
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class _UpperCAmelCase ( _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = MvpTokenizer
a_ = MvpTokenizerFast
a_ = True
a_ = filter_roberta_detectors
def lowercase ( self : Dict ) -> str:
super().setUp()
__lowerCAmelCase = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
__lowerCAmelCase = dict(zip(lowerCAmelCase_ , range(len(lowerCAmelCase_ ) ) ) )
__lowerCAmelCase = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
__lowerCAmelCase = {'unk_token': '<unk>'}
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(lowerCAmelCase_ ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(lowerCAmelCase_ ) )
def lowercase ( self : Tuple , **lowerCAmelCase_ : Tuple ) -> List[str]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowercase ( self : Optional[Any] , **lowerCAmelCase_ : str ) -> Tuple:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **lowerCAmelCase_ )
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : Union[str, Any] ) -> List[Any]:
return "lower newer", "lower newer"
@cached_property
def lowercase ( self : Optional[int] ) -> List[Any]:
return MvpTokenizer.from_pretrained('RUCAIBox/mvp' )
@cached_property
def lowercase ( self : Optional[int] ) -> int:
return MvpTokenizerFast.from_pretrained('RUCAIBox/mvp' )
@require_torch
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
__lowerCAmelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__lowerCAmelCase = [0, 2_5_0, 2_5_1, 1_7_8_1_8, 1_3, 3_9_1_8_6, 1_9_3_8, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase = tokenizer(lowerCAmelCase_ , max_length=len(lowerCAmelCase_ ) , padding=lowerCAmelCase_ , return_tensors='pt' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
__lowerCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(lowerCAmelCase_ , lowerCAmelCase_ )
# Test that special tokens are reset
@require_torch
def lowercase ( self : int ) -> Union[str, Any]:
__lowerCAmelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase = tokenizer(lowerCAmelCase_ , padding=lowerCAmelCase_ , return_tensors='pt' )
# check if input_ids are returned and no labels
self.assertIn('input_ids' , lowerCAmelCase_ )
self.assertIn('attention_mask' , lowerCAmelCase_ )
self.assertNotIn('labels' , lowerCAmelCase_ )
self.assertNotIn('decoder_attention_mask' , lowerCAmelCase_ )
@require_torch
def lowercase ( self : List[Any] ) -> List[Any]:
__lowerCAmelCase = [
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase = tokenizer(text_target=lowerCAmelCase_ , max_length=3_2 , padding='max_length' , return_tensors='pt' )
self.assertEqual(3_2 , targets['input_ids'].shape[1] )
@require_torch
def lowercase ( self : Dict ) -> Optional[int]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase = tokenizer(
['I am a small frog' * 1_0_2_4, 'I am a small frog'] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , return_tensors='pt' )
self.assertIsInstance(lowerCAmelCase_ , lowerCAmelCase_ )
self.assertEqual(batch.input_ids.shape , (2, 1_0_2_4) )
@require_torch
def lowercase ( self : int ) -> Dict:
__lowerCAmelCase = ['A long paragraph for summarization.']
__lowerCAmelCase = [
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
__lowerCAmelCase = tokenizer(lowerCAmelCase_ , text_target=lowerCAmelCase_ , return_tensors='pt' )
__lowerCAmelCase = inputs['input_ids']
__lowerCAmelCase = inputs['labels']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def lowercase ( self : List[Any] ) -> Any:
pass
def lowercase ( self : Optional[int] ) -> List[str]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__lowerCAmelCase = self.rust_tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
__lowerCAmelCase = self.tokenizer_class.from_pretrained(lowerCAmelCase_ , **lowerCAmelCase_ )
__lowerCAmelCase = 'A, <mask> AllenNLP sentence.'
__lowerCAmelCase = tokenizer_r.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
__lowerCAmelCase = tokenizer_p.encode_plus(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ , return_token_type_ids=lowerCAmelCase_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) , sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) , sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) , )
__lowerCAmelCase = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
__lowerCAmelCase = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
lowerCAmelCase_ , ['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
| 421 |
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class _UpperCAmelCase :
"""simple docstring"""
def __init__( self : Optional[Any] , lowerCAmelCase_ : str , lowerCAmelCase_ : int=1_3 , lowerCAmelCase_ : List[str]=3_0 , lowerCAmelCase_ : int=2 , lowerCAmelCase_ : Tuple=3 , lowerCAmelCase_ : int=True , lowerCAmelCase_ : Optional[Any]=True , lowerCAmelCase_ : Union[str, Any]=3_2 , lowerCAmelCase_ : int=5 , lowerCAmelCase_ : Any=4 , lowerCAmelCase_ : str=3_7 , lowerCAmelCase_ : Any="gelu" , lowerCAmelCase_ : List[str]=0.1 , lowerCAmelCase_ : str=0.1 , lowerCAmelCase_ : Any=1_0 , lowerCAmelCase_ : Any=0.02 , lowerCAmelCase_ : int=3 , lowerCAmelCase_ : Union[str, Any]=None , lowerCAmelCase_ : Any=2 , ) -> Any:
__lowerCAmelCase = parent
__lowerCAmelCase = batch_size
__lowerCAmelCase = image_size
__lowerCAmelCase = patch_size
__lowerCAmelCase = num_channels
__lowerCAmelCase = is_training
__lowerCAmelCase = use_labels
__lowerCAmelCase = hidden_size
__lowerCAmelCase = num_hidden_layers
__lowerCAmelCase = num_attention_heads
__lowerCAmelCase = intermediate_size
__lowerCAmelCase = hidden_act
__lowerCAmelCase = hidden_dropout_prob
__lowerCAmelCase = attention_probs_dropout_prob
__lowerCAmelCase = type_sequence_label_size
__lowerCAmelCase = initializer_range
__lowerCAmelCase = scope
__lowerCAmelCase = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__lowerCAmelCase = (image_size // patch_size) ** 2
__lowerCAmelCase = num_patches + 2
def lowercase ( self : Union[str, Any] ) -> Any:
__lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase = None
if self.use_labels:
__lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def lowercase ( self : str ) -> int:
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowercase ( self : Optional[int] , lowerCAmelCase_ : str , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple ) -> Any:
__lowerCAmelCase = DeiTModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase ( self : Union[str, Any] , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Tuple ) -> Union[str, Any]:
__lowerCAmelCase = DeiTForMaskedImageModeling(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__lowerCAmelCase = 1
__lowerCAmelCase = DeiTForMaskedImageModeling(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCAmelCase = model(lowerCAmelCase_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowercase ( self : str , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : Any ) -> str:
__lowerCAmelCase = self.type_sequence_label_size
__lowerCAmelCase = DeiTForImageClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__lowerCAmelCase = 1
__lowerCAmelCase = DeiTForImageClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
__lowerCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__lowerCAmelCase = model(lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase ( self : List[Any] ) -> List[Any]:
__lowerCAmelCase = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) = config_and_inputs
__lowerCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class _UpperCAmelCase ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
a_ = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
a_ = (
{
"""feature-extraction""": DeiTModel,
"""image-classification""": (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
a_ = False
a_ = False
a_ = False
def lowercase ( self : List[str] ) -> Tuple:
__lowerCAmelCase = DeiTModelTester(self )
__lowerCAmelCase = ConfigTester(self , config_class=lowerCAmelCase_ , has_text_modality=lowerCAmelCase_ , hidden_size=3_7 )
def lowercase ( self : Union[str, Any] ) -> Tuple:
self.config_tester.run_common_tests()
@unittest.skip(reason='DeiT does not use inputs_embeds' )
def lowercase ( self : Union[str, Any] ) -> str:
pass
def lowercase ( self : Tuple ) -> Union[str, Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase_ , nn.Linear ) )
def lowercase ( self : str ) -> Union[str, Any]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase = model_class(lowerCAmelCase_ )
__lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase = [*signature.parameters.keys()]
__lowerCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , lowerCAmelCase_ )
def lowercase ( self : List[Any] ) -> Optional[Any]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowercase ( self : Dict ) -> int:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase_ )
def lowercase ( self : List[Any] ) -> Optional[int]:
__lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase_ )
def lowercase ( self : Optional[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple=False ) -> int:
__lowerCAmelCase = super()._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def lowercase ( self : str ) -> Union[str, Any]:
if not self.model_tester.is_training:
return
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCAmelCase_ )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.train()
__lowerCAmelCase = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
__lowerCAmelCase = model(**lowerCAmelCase_ ).loss
loss.backward()
def lowercase ( self : List[str] ) -> List[str]:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
__lowerCAmelCase = False
__lowerCAmelCase = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCAmelCase_ ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.gradient_checkpointing_enable()
model.to(lowerCAmelCase_ )
model.train()
__lowerCAmelCase = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
__lowerCAmelCase = model(**lowerCAmelCase_ ).loss
loss.backward()
def lowercase ( self : Optional[int] ) -> Any:
__lowerCAmelCase , __lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase = [
{'title': 'multi_label_classification', 'num_labels': 2, 'dtype': torch.float},
{'title': 'single_label_classification', 'num_labels': 1, 'dtype': torch.long},
{'title': 'regression', 'num_labels': 1, 'dtype': torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCAmelCase_ ),
*get_values(lowerCAmelCase_ ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f"""Testing {model_class} with {problem_type["title"]}""" ):
__lowerCAmelCase = problem_type['title']
__lowerCAmelCase = problem_type['num_labels']
__lowerCAmelCase = model_class(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.train()
__lowerCAmelCase = self._prepare_for_class(lowerCAmelCase_ , lowerCAmelCase_ , return_labels=lowerCAmelCase_ )
if problem_type["num_labels"] > 1:
__lowerCAmelCase = inputs['labels'].unsqueeze(1 ).repeat(1 , problem_type['num_labels'] )
__lowerCAmelCase = inputs['labels'].to(problem_type['dtype'] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCAmelCase_ ) as warning_list:
__lowerCAmelCase = model(**lowerCAmelCase_ ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f"""Something is going wrong in the regression problem: intercepted {w.message}""" )
loss.backward()
@slow
def lowercase ( self : List[str] ) -> List[Any]:
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase = DeiTModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
def a_ ( ):
__lowerCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class _UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowercase ( self : str ) -> List[str]:
return (
DeiTImageProcessor.from_pretrained('facebook/deit-base-distilled-patch16-224' )
if is_vision_available()
else None
)
@slow
def lowercase ( self : Union[str, Any] ) -> Optional[Any]:
__lowerCAmelCase = DeiTForImageClassificationWithTeacher.from_pretrained('facebook/deit-base-distilled-patch16-224' ).to(
lowerCAmelCase_ )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=lowerCAmelCase_ , return_tensors='pt' ).to(lowerCAmelCase_ )
# forward pass
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
# verify the logits
__lowerCAmelCase = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase_ )
__lowerCAmelCase = torch.tensor([-1.02_66, 0.19_12, -1.28_61] ).to(lowerCAmelCase_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase_ , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def lowercase ( self : Dict ) -> int:
__lowerCAmelCase = DeiTModel.from_pretrained(
'facebook/deit-base-distilled-patch16-224' , torch_dtype=torch.floataa , device_map='auto' )
__lowerCAmelCase = self.default_image_processor
__lowerCAmelCase = prepare_img()
__lowerCAmelCase = image_processor(images=lowerCAmelCase_ , return_tensors='pt' )
__lowerCAmelCase = inputs.pixel_values.to(lowerCAmelCase_ )
# forward pass to make sure inference works in fp16
with torch.no_grad():
__lowerCAmelCase = model(lowerCAmelCase_ )
| 421 | 1 |
def __snake_case ( lowerCAmelCase_ ) -> int:
if not grid or not grid[0]:
raise TypeError('''The grid does not contain the appropriate information''' )
for cell_n in range(1 , len(grid[0] ) ):
grid[0][cell_n] += grid[0][cell_n - 1]
SCREAMING_SNAKE_CASE__ = grid[0]
for row_n in range(1 , len(lowerCAmelCase_ ) ):
SCREAMING_SNAKE_CASE__ = grid[row_n]
SCREAMING_SNAKE_CASE__ = fill_row(lowerCAmelCase_ , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ = grid[row_n]
return grid[-1][-1]
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list:
current_row[0] += row_above[0]
for cell_n in range(1 , len(lowerCAmelCase_ ) ):
current_row[cell_n] += min(current_row[cell_n - 1] , row_above[cell_n] )
return current_row
if __name__ == "__main__":
import doctest
doctest.testmod()
| 100 |
'''simple docstring'''
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
a__ : Dict = logging.get_logger(__name__)
# General docstring
a__ : Any = '''PoolFormerConfig'''
# Base docstring
a__ : List[str] = '''sail/poolformer_s12'''
a__ : Tuple = [1, 512, 7, 7]
# Image classification docstring
a__ : Dict = '''sail/poolformer_s12'''
a__ : Optional[Any] = '''tabby, tabby cat'''
a__ : Any = [
'''sail/poolformer_s12''',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def __lowerCamelCase ( UpperCAmelCase_ , UpperCAmelCase_ = 0.0 , UpperCAmelCase_ = False ) ->Optional[int]:
if drop_prob == 0.0 or not training:
return input
snake_case__ = 1 - drop_prob
snake_case__ = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
snake_case__ = keep_prob + torch.rand(UpperCAmelCase_ , dtype=input.dtype , device=input.device )
random_tensor.floor_() # binarize
snake_case__ = input.div(UpperCAmelCase_ ) * random_tensor
return output
class __snake_case ( nn.Module ):
def __init__( self , UpperCamelCase_ = None ) -> None:
super().__init__()
snake_case__ = drop_prob
def _snake_case ( self , UpperCamelCase_ ) -> torch.Tensor:
return drop_path(UpperCamelCase_ , self.drop_prob , self.training )
def _snake_case ( self ) -> str:
return "p={}".format(self.drop_prob )
class __snake_case ( nn.Module ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_=None ) -> Optional[Any]:
super().__init__()
snake_case__ = patch_size if isinstance(UpperCamelCase_ , collections.abc.Iterable ) else (patch_size, patch_size)
snake_case__ = stride if isinstance(UpperCamelCase_ , collections.abc.Iterable ) else (stride, stride)
snake_case__ = padding if isinstance(UpperCamelCase_ , collections.abc.Iterable ) else (padding, padding)
snake_case__ = nn.Convad(UpperCamelCase_ , UpperCamelCase_ , kernel_size=UpperCamelCase_ , stride=UpperCamelCase_ , padding=UpperCamelCase_ )
snake_case__ = norm_layer(UpperCamelCase_ ) if norm_layer else nn.Identity()
def _snake_case ( self , UpperCamelCase_ ) -> int:
snake_case__ = self.projection(UpperCamelCase_ )
snake_case__ = self.norm(UpperCamelCase_ )
return embeddings
class __snake_case ( nn.GroupNorm ):
def __init__( self , UpperCamelCase_ , **UpperCamelCase_ ) -> Dict:
super().__init__(1 , UpperCamelCase_ , **UpperCamelCase_ )
class __snake_case ( nn.Module ):
def __init__( self , UpperCamelCase_ ) -> int:
super().__init__()
snake_case__ = nn.AvgPoolad(UpperCamelCase_ , stride=1 , padding=pool_size // 2 , count_include_pad=UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ) -> str:
return self.pool(UpperCamelCase_ ) - hidden_states
class __snake_case ( nn.Module ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> int:
super().__init__()
snake_case__ = nn.Convad(UpperCamelCase_ , UpperCamelCase_ , 1 )
snake_case__ = nn.Convad(UpperCamelCase_ , UpperCamelCase_ , 1 )
snake_case__ = PoolFormerDropPath(UpperCamelCase_ )
if isinstance(config.hidden_act , UpperCamelCase_ ):
snake_case__ = ACTaFN[config.hidden_act]
else:
snake_case__ = config.hidden_act
def _snake_case ( self , UpperCamelCase_ ) -> Any:
snake_case__ = self.conva(UpperCamelCase_ )
snake_case__ = self.act_fn(UpperCamelCase_ )
snake_case__ = self.drop(UpperCamelCase_ )
snake_case__ = self.conva(UpperCamelCase_ )
snake_case__ = self.drop(UpperCamelCase_ )
return hidden_states
class __snake_case ( nn.Module ):
def __init__( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) -> Tuple:
super().__init__()
snake_case__ = PoolFormerPooling(UpperCamelCase_ )
snake_case__ = PoolFormerOutput(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
snake_case__ = PoolFormerGroupNorm(UpperCamelCase_ )
snake_case__ = PoolFormerGroupNorm(UpperCamelCase_ )
# Useful for training neural nets
snake_case__ = PoolFormerDropPath(UpperCamelCase_ ) if drop_path > 0.0 else nn.Identity()
snake_case__ = config.use_layer_scale
if config.use_layer_scale:
snake_case__ = nn.Parameter(
config.layer_scale_init_value * torch.ones((UpperCamelCase_) ) , requires_grad=UpperCamelCase_ )
snake_case__ = nn.Parameter(
config.layer_scale_init_value * torch.ones((UpperCamelCase_) ) , requires_grad=UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ ) -> List[str]:
if self.use_layer_scale:
snake_case__ = self.pooling(self.before_norm(UpperCamelCase_ ) )
snake_case__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
snake_case__ = hidden_states + self.drop_path(UpperCamelCase_ )
snake_case__ = ()
snake_case__ = self.output(self.after_norm(UpperCamelCase_ ) )
snake_case__ = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
snake_case__ = hidden_states + self.drop_path(UpperCamelCase_ )
snake_case__ = (output,) + outputs
return outputs
else:
snake_case__ = self.drop_path(self.pooling(self.before_norm(UpperCamelCase_ ) ) )
# First residual connection
snake_case__ = pooling_output + hidden_states
snake_case__ = ()
# Second residual connection inside the PoolFormerOutput block
snake_case__ = self.drop_path(self.output(self.after_norm(UpperCamelCase_ ) ) )
snake_case__ = hidden_states + layer_output
snake_case__ = (output,) + outputs
return outputs
class __snake_case ( nn.Module ):
def __init__( self , UpperCamelCase_ ) -> Dict:
super().__init__()
snake_case__ = config
# stochastic depth decay rule
snake_case__ = [x.item() for x in torch.linspace(0 , config.drop_path_rate , sum(config.depths ) )]
# patch embeddings
snake_case__ = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] , stride=config.strides[i] , padding=config.padding[i] , num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] , hidden_size=config.hidden_sizes[i] , ) )
snake_case__ = nn.ModuleList(UpperCamelCase_ )
# Transformer blocks
snake_case__ = []
snake_case__ = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
snake_case__ = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
UpperCamelCase_ , num_channels=config.hidden_sizes[i] , pool_size=config.pool_size , hidden_size=config.hidden_sizes[i] , intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) , drop_path=dpr[cur + j] , ) )
blocks.append(nn.ModuleList(UpperCamelCase_ ) )
snake_case__ = nn.ModuleList(UpperCamelCase_ )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=False , UpperCamelCase_=True ) -> Dict:
snake_case__ = () if output_hidden_states else None
snake_case__ = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings , self.block ) ):
snake_case__ , snake_case__ = layers
# Get patch embeddings from hidden_states
snake_case__ = embedding_layer(UpperCamelCase_ )
# Send the embeddings through the blocks
for _, blk in enumerate(UpperCamelCase_ ):
snake_case__ = blk(UpperCamelCase_ )
snake_case__ = layer_outputs[0]
if output_hidden_states:
snake_case__ = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=UpperCamelCase_ , hidden_states=UpperCamelCase_ )
class __snake_case ( __magic_name__ ):
__lowerCAmelCase = PoolFormerConfig
__lowerCAmelCase = '''poolformer'''
__lowerCAmelCase = '''pixel_values'''
__lowerCAmelCase = True
def _snake_case ( self , UpperCamelCase_ ) -> str:
if isinstance(UpperCamelCase_ , (nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(UpperCamelCase_ , nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _snake_case ( self , UpperCamelCase_ , UpperCamelCase_=False ) -> int:
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
snake_case__ = value
a__ : int = r'''
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use
it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
'''
a__ : Union[str, Any] = r'''
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`PoolFormerImageProcessor.__call__`] for details.
'''
@add_start_docstrings(
'''The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.''' , __magic_name__ , )
class __snake_case ( __magic_name__ ):
def __init__( self , UpperCamelCase_ ) -> Any:
super().__init__(UpperCamelCase_ )
snake_case__ = config
snake_case__ = PoolFormerEncoder(UpperCamelCase_ )
# Initialize weights and apply final processing
self.post_init()
def _snake_case ( self ) -> str:
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=UpperCamelCase_ , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def _snake_case ( self , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , ) -> Union[Tuple, BaseModelOutputWithNoAttention]:
snake_case__ = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
snake_case__ = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError('You have to specify pixel_values' )
snake_case__ = self.encoder(
UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , return_dict=UpperCamelCase_ , )
snake_case__ = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=UpperCamelCase_ , hidden_states=encoder_outputs.hidden_states , )
class __snake_case ( nn.Module ):
def __init__( self , UpperCamelCase_ ) -> str:
super().__init__()
snake_case__ = nn.Linear(config.hidden_size , config.hidden_size )
def _snake_case ( self , UpperCamelCase_ ) -> Any:
snake_case__ = self.dense(UpperCamelCase_ )
return output
@add_start_docstrings(
'''
PoolFormer Model transformer with an image classification head on top
''' , __magic_name__ , )
class __snake_case ( __magic_name__ ):
def __init__( self , UpperCamelCase_ ) -> Optional[Any]:
super().__init__(UpperCamelCase_ )
snake_case__ = config.num_labels
snake_case__ = PoolFormerModel(UpperCamelCase_ )
# Final norm
snake_case__ = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
snake_case__ = (
nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UpperCamelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=UpperCamelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def _snake_case ( self , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , UpperCamelCase_ = None , ) -> Union[Tuple, ImageClassifierOutputWithNoAttention]:
snake_case__ = return_dict if return_dict is not None else self.config.use_return_dict
snake_case__ = self.poolformer(
UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , return_dict=UpperCamelCase_ , )
snake_case__ = outputs[0]
snake_case__ = self.classifier(self.norm(UpperCamelCase_ ).mean([-2, -1] ) )
snake_case__ = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
snake_case__ = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
snake_case__ = 'single_label_classification'
else:
snake_case__ = 'multi_label_classification'
if self.config.problem_type == "regression":
snake_case__ = MSELoss()
if self.num_labels == 1:
snake_case__ = loss_fct(logits.squeeze() , labels.squeeze() )
else:
snake_case__ = loss_fct(UpperCamelCase_ , UpperCamelCase_ )
elif self.config.problem_type == "single_label_classification":
snake_case__ = CrossEntropyLoss()
snake_case__ = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
snake_case__ = BCEWithLogitsLoss()
snake_case__ = loss_fct(UpperCamelCase_ , UpperCamelCase_ )
if not return_dict:
snake_case__ = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=UpperCamelCase_ , logits=UpperCamelCase_ , hidden_states=outputs.hidden_states )
| 368 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a_ = logging.get_logger(__name__)
a_ = {}
class UpperCAmelCase_ ( lowerCAmelCase__ ):
UpperCamelCase ="llama"
UpperCamelCase =["past_key_values"]
def __init__( self , UpperCamelCase_=3_20_00 , UpperCamelCase_=40_96 , UpperCamelCase_=1_10_08 , UpperCamelCase_=32 , UpperCamelCase_=32 , UpperCamelCase_=None , UpperCamelCase_="silu" , UpperCamelCase_=20_48 , UpperCamelCase_=0.0_2 , UpperCamelCase_=1E-6 , UpperCamelCase_=True , UpperCamelCase_=0 , UpperCamelCase_=1 , UpperCamelCase_=2 , UpperCamelCase_=1 , UpperCamelCase_=False , UpperCamelCase_=None , **UpperCamelCase_ , ) -> Any:
__lowercase : Optional[Any] = vocab_size
__lowercase : Optional[Any] = max_position_embeddings
__lowercase : Union[str, Any] = hidden_size
__lowercase : Dict = intermediate_size
__lowercase : int = num_hidden_layers
__lowercase : int = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
__lowercase : Dict = num_attention_heads
__lowercase : str = num_key_value_heads
__lowercase : int = hidden_act
__lowercase : Optional[Any] = initializer_range
__lowercase : Any = rms_norm_eps
__lowercase : Union[str, Any] = pretraining_tp
__lowercase : List[Any] = use_cache
__lowercase : Optional[Any] = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , tie_word_embeddings=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
def _lowerCamelCase ( self ) -> str:
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , _SCREAMING_SNAKE_CASE ) or len(self.rope_scaling ) != 2:
raise ValueError(
'''`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '''
F"""got {self.rope_scaling}""" )
__lowercase : List[Any] = self.rope_scaling.get('''type''' , _SCREAMING_SNAKE_CASE )
__lowercase : List[str] = self.rope_scaling.get('''factor''' , _SCREAMING_SNAKE_CASE )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F"""`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}""" )
if rope_scaling_factor is None or not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) or rope_scaling_factor <= 1.0:
raise ValueError(F"""`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}""" )
| 709 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class UpperCAmelCase_ ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> List[str]:
__lowercase : Union[str, Any] = 10
def _lowerCamelCase ( self ) -> str:
__lowercase : List[str] = [1, 2, 3, 4]
__lowercase : List[Any] = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(UpperCamelCase_ , self.block_size , 0 ) , UpperCamelCase_ )
def _lowerCamelCase ( self ) -> Optional[int]:
__lowercase : Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
__lowercase : Any = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCamelCase_ , self.block_size , 0 ) , UpperCamelCase_ )
def _lowerCamelCase ( self ) -> int:
__lowercase : List[str] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
__lowercase : List[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(UpperCamelCase_ , self.block_size , 0 ) , UpperCamelCase_ )
def _lowerCamelCase ( self ) -> List[Any]:
__lowercase : List[Any] = '''It was the year of Our Lord one thousand seven hundred and
seventy-five.\n\nSpiritual revelations were conceded to England at that
favoured period, as at this.'''
__lowercase ,__lowercase : Optional[Any] = process_story(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , [] )
def _lowerCamelCase ( self ) -> Optional[Any]:
__lowercase : Optional[int] = ''''''
__lowercase ,__lowercase : Any = process_story(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , [] )
self.assertEqual(UpperCamelCase_ , [] )
def _lowerCamelCase ( self ) -> Dict:
__lowercase : List[str] = (
'''It was the year of Our Lord one thousand seven hundred and '''
'''seventy-five\n\nSpiritual revelations were conceded to England '''
'''at that favoured period, as at this.\n@highlight\n\nIt was the best of times'''
)
__lowercase ,__lowercase : int = process_story(UpperCamelCase_ )
__lowercase : Union[str, Any] = [
'''It was the year of Our Lord one thousand seven hundred and seventy-five.''',
'''Spiritual revelations were conceded to England at that favoured period, as at this.''',
]
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
__lowercase : List[str] = ['''It was the best of times.''']
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ )
def _lowerCamelCase ( self ) -> Tuple:
__lowercase : Union[str, Any] = torch.tensor([1, 2, 3, 4] )
__lowercase : Union[str, Any] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(UpperCamelCase_ , 0 ).numpy() , expected.numpy() )
def _lowerCamelCase ( self ) -> List[str]:
__lowercase : Optional[Any] = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
__lowercase : Any = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCamelCase_ , 23 ).numpy() , expected.numpy() )
def _lowerCamelCase ( self ) -> Optional[int]:
__lowercase : Tuple = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
__lowercase : str = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(UpperCamelCase_ , 1 ).numpy() , expected.numpy() )
def _lowerCamelCase ( self ) -> Dict:
__lowercase : List[Any] = 1_01
__lowercase : Union[str, Any] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_01, 5, 6], [1, 1_01, 3, 4, 1_01, 6]] )
__lowercase : Optional[Any] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
__lowercase : Optional[int] = compute_token_type_ids(UpperCamelCase_ , UpperCamelCase_ )
np.testing.assert_array_equal(UpperCamelCase_ , UpperCamelCase_ )
| 523 | 0 |
from __future__ import annotations
def __UpperCAmelCase ( UpperCAmelCase )-> list[int]:
"""simple docstring"""
lowercase = [True] * limit
lowercase = False
lowercase = False
lowercase = True
for i in range(3, int(limit**0.5 + 1 ), 2 ):
lowercase = i * 2
while index < limit:
lowercase = False
lowercase = index + i
lowercase = [2]
for i in range(3, UpperCAmelCase, 2 ):
if is_prime[i]:
primes.append(UpperCAmelCase )
return primes
def __UpperCAmelCase ( UpperCAmelCase = 1000000 )-> int:
"""simple docstring"""
lowercase = prime_sieve(UpperCAmelCase )
lowercase = 0
lowercase = 0
for i in range(len(UpperCAmelCase ) ):
for j in range(i + length, len(UpperCAmelCase ) ):
lowercase = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
lowercase = j - i
lowercase = sol
return largest
if __name__ == "__main__":
print(F"{solution() = }")
| 604 | import gc
import threading
import time
import psutil
import torch
class __lowercase :
def __init__( self : List[str] ) -> List[str]:
'''simple docstring'''
lowercase = psutil.Process()
lowercase = False
def __a ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
lowercase = -1
while True:
lowercase = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def __a ( self : int ) -> List[str]:
'''simple docstring'''
lowercase = True
lowercase = threading.Thread(target=self.peak_monitor )
lowercase = True
self.thread.start()
def __a ( self : Any ) -> Dict:
'''simple docstring'''
lowercase = False
self.thread.join()
return self.cpu_memory_peak
A_ = PeakCPUMemory()
def __UpperCAmelCase ( )-> List[str]:
"""simple docstring"""
lowercase = {'''time''': time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
lowercase = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
lowercase = torch.cuda.memory_allocated(UpperCAmelCase )
torch.cuda.reset_peak_memory_stats()
return measures
def __UpperCAmelCase ( UpperCAmelCase )-> int:
"""simple docstring"""
lowercase = {'''time''': time.time() - start_measures['''time''']}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
lowercase = (psutil.Process().memory_info().rss - start_measures['''cpu''']) / 2**20
lowercase = (cpu_peak_tracker.stop() - start_measures['''cpu''']) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
lowercase = (torch.cuda.memory_allocated(UpperCAmelCase ) - start_measures[str(UpperCAmelCase )]) / 2**20
lowercase = (torch.cuda.max_memory_allocated(UpperCAmelCase ) - start_measures[str(UpperCAmelCase )]) / 2**20
return measures
def __UpperCAmelCase ( UpperCAmelCase, UpperCAmelCase )-> Any:
"""simple docstring"""
print(f'{description}:' )
print(f'- Time: {measures["time"]:.2f}s' )
for i in range(torch.cuda.device_count() ):
print(f'- GPU {i} allocated: {measures[str(UpperCAmelCase )]:.2f}MiB' )
lowercase = measures[f'{i}-peak']
print(f'- GPU {i} peak: {peak:.2f}MiB' )
print(f'- CPU RAM allocated: {measures["cpu"]:.2f}MiB' )
print(f'- CPU RAM peak: {measures["cpu-peak"]:.2f}MiB' )
| 604 | 1 |
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : int = tmp_path / 'cache'
A_ : List[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A_ : List[str] = JsonDatasetReader(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , keep_in_memory=_SCREAMING_SNAKE_CASE ).read()
_check_json_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : List[str] = tmp_path / 'cache'
A_ : Any = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A_ : Any = features.copy() if features else default_expected_features
A_ : List[Any] = (
Features({feature: Value(_SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
A_ : Dict = JsonDatasetReader(_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read()
_check_json_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'features' , [
None,
{'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'},
] , )
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : List[Any] = tmp_path / 'cache'
A_ : Optional[int] = {'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'}
A_ : List[Any] = features.copy() if features else default_expected_features
A_ : Dict = (
Features({feature: Value(_SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
A_ : Any = JsonDatasetReader(_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read()
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Union[str, Any] = {'col_2': 'int64', 'col_3': 'float64', 'col_1': 'string'}
A_ : List[str] = features.copy()
A_ : Dict = (
Features({feature: Value(_SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
A_ : int = tmp_path / 'cache'
A_ : Any = JsonDatasetReader(_SCREAMING_SNAKE_CASE , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read()
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Dict = tmp_path / 'cache'
A_ : List[Any] = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A_ : Union[str, Any] = JsonDatasetReader(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE , split=_SCREAMING_SNAKE_CASE ).read()
_check_json_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ : Optional[int] = jsonl_path
elif issubclass(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
A_ : Optional[int] = [jsonl_path]
A_ : str = tmp_path / 'cache'
A_ : int = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A_ : List[str] = JsonDatasetReader(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read()
_check_json_dataset(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=("train",) ):
"""simple docstring"""
assert isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for split in splits:
A_ : Optional[int] = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : int = tmp_path / 'cache'
A_ : int = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A_ : str = JsonDatasetReader({'train': jsonl_path} , cache_dir=_SCREAMING_SNAKE_CASE , keep_in_memory=_SCREAMING_SNAKE_CASE ).read()
_check_json_datasetdict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
A_ : Optional[Any] = tmp_path / 'cache'
A_ : Tuple = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A_ : Union[str, Any] = features.copy() if features else default_expected_features
A_ : Tuple = (
Features({feature: Value(_SCREAMING_SNAKE_CASE ) for feature, dtype in features.items()} ) if features is not None else None
)
A_ : Union[str, Any] = JsonDatasetReader({'train': jsonl_path} , features=_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read()
_check_json_datasetdict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def UpperCAmelCase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ):
"""simple docstring"""
if split:
A_ : List[Any] = {split: jsonl_path}
else:
A_ : Tuple = 'train'
A_ : str = {'train': jsonl_path, 'test': jsonl_path}
A_ : int = tmp_path / 'cache'
A_ : int = {'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'}
A_ : Any = JsonDatasetReader(_SCREAMING_SNAKE_CASE , cache_dir=_SCREAMING_SNAKE_CASE ).read()
_check_json_datasetdict(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
return json.load(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( _UpperCAmelCase ):
"""simple docstring"""
return [json.loads(_SCREAMING_SNAKE_CASE ) for line in buffer]
class _UpperCAmelCase :
'''simple docstring'''
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(snake_case_ , snake_case_ , lines=snake_case_ ).write()
buffer.seek(0 )
A_ : Union[str, Any] = load_json_function(snake_case_ )
assert isinstance(snake_case_ , snake_case_ )
assert isinstance(exported_content[0] , snake_case_ )
assert len(snake_case_ ) == 1_0
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(snake_case_ , snake_case_ , lines=snake_case_ , orient=snake_case_ ).write()
buffer.seek(0 )
A_ : Dict = load_json(snake_case_ )
assert isinstance(snake_case_ , snake_case_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(snake_case_ , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 1_0
else:
assert len(snake_case_ ) == 1_0
@pytest.mark.parametrize('lines, load_json_function' , [(True, load_json_lines), (False, load_json)] )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(snake_case_ , snake_case_ , lines=snake_case_ , num_proc=2 ).write()
buffer.seek(0 )
A_ : int = load_json_function(snake_case_ )
assert isinstance(snake_case_ , snake_case_ )
assert isinstance(exported_content[0] , snake_case_ )
assert len(snake_case_ ) == 1_0
@pytest.mark.parametrize(
'orient, container, keys, len_at' , [
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] , )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
with io.BytesIO() as buffer:
JsonDatasetWriter(snake_case_ , snake_case_ , lines=snake_case_ , orient=snake_case_ , num_proc=2 ).write()
buffer.seek(0 )
A_ : str = load_json(snake_case_ )
assert isinstance(snake_case_ , snake_case_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(snake_case_ , 'keys' ) and not hasattr(exported_content[0] , 'keys' )
if len_at:
assert len(exported_content[len_at] ) == 1_0
else:
assert len(snake_case_ ) == 1_0
def lowerCamelCase_ ( self , snake_case_ ):
"""simple docstring"""
with pytest.raises(snake_case_ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(snake_case_ , snake_case_ , num_proc=0 )
@pytest.mark.parametrize('compression, extension' , [('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
A_ : str = tmp_path_factory.mktemp('data' ) / F"""test.json.{extension}"""
A_ : Optional[Any] = str(shared_datadir / F"""test_file.json.{extension}""" )
JsonDatasetWriter(snake_case_ , snake_case_ , compression=snake_case_ ).write()
with fsspec.open(snake_case_ , 'rb' , compression='infer' ) as f:
A_ : List[str] = f.read()
with fsspec.open(snake_case_ , 'rb' , compression='infer' ) as f:
A_ : int = f.read()
assert exported_content == original_content | 701 |
"""simple docstring"""
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class _UpperCAmelCase ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
lowercase_ : Dict = IFPipeline
lowercase_ : int = TEXT_TO_IMAGE_PARAMS - {"""width""", """height""", """latents"""}
lowercase_ : List[str] = TEXT_TO_IMAGE_BATCH_PARAMS
lowercase_ : str = PipelineTesterMixin.required_optional_params - {"""latents"""}
def lowerCamelCase_ ( self ):
"""simple docstring"""
return self._get_dummy_components()
def lowerCamelCase_ ( self , snake_case_ , snake_case_=0 ):
"""simple docstring"""
if str(snake_case_ ).startswith('mps' ):
A_ : Optional[int] = torch.manual_seed(snake_case_ )
else:
A_ : str = torch.Generator(device=snake_case_ ).manual_seed(snake_case_ )
A_ : Any = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase_ ( self ):
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def lowerCamelCase_ ( self ):
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1E-1 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self._test_save_load_local()
def lowerCamelCase_ ( self ):
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowerCamelCase_ ( self ):
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@slow
@require_torch_gpu
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self ):
"""simple docstring"""
A_ : Union[str, Any] = IFPipeline.from_pretrained('DeepFloyd/IF-I-XL-v1.0' , variant='fp16' , torch_dtype=torch.floataa )
A_ : str = IFSuperResolutionPipeline.from_pretrained(
'DeepFloyd/IF-II-L-v1.0' , variant='fp16' , torch_dtype=torch.floataa , text_encoder=snake_case_ , tokenizer=snake_case_ )
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('cuda' )
A_ , A_ : Tuple = pipe_a.encode_prompt('anime turtle' , device='cuda' )
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
A_ : List[Any] = None
A_ : List[str] = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
A_ : Union[str, Any] = IFImgaImgPipeline(**pipe_a.components )
A_ : Optional[int] = IFImgaImgSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_imgaimg(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
A_ : Optional[Any] = IFInpaintingPipeline(**pipe_a.components )
A_ : Union[str, Any] = IFInpaintingSuperResolutionPipeline(**pipe_a.components )
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor() )
self._test_if_inpainting(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
_start_torch_memory_measurement()
A_ : str = torch.Generator(device='cpu' ).manual_seed(0 )
A_ : Optional[Any] = pipe_a(
prompt_embeds=snake_case_ , negative_prompt_embeds=snake_case_ , num_inference_steps=2 , generator=snake_case_ , output_type='np' , )
A_ : Dict = output.images[0]
assert image.shape == (6_4, 6_4, 3)
A_ : List[Any] = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_3 * 1_0**9
A_ : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy' )
assert_mean_pixel_difference(snake_case_ , snake_case_ )
# pipeline 2
_start_torch_memory_measurement()
A_ : List[Any] = torch.Generator(device='cpu' ).manual_seed(0 )
A_ : Optional[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case_ )
A_ : str = pipe_a(
prompt_embeds=snake_case_ , negative_prompt_embeds=snake_case_ , image=snake_case_ , generator=snake_case_ , num_inference_steps=2 , output_type='np' , )
A_ : str = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
A_ : str = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
A_ : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy' )
assert_mean_pixel_difference(snake_case_ , snake_case_ )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
_start_torch_memory_measurement()
A_ : str = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case_ )
A_ : Tuple = torch.Generator(device='cpu' ).manual_seed(0 )
A_ : Optional[Any] = pipe_a(
prompt_embeds=snake_case_ , negative_prompt_embeds=snake_case_ , image=snake_case_ , num_inference_steps=2 , generator=snake_case_ , output_type='np' , )
A_ : List[str] = output.images[0]
assert image.shape == (6_4, 6_4, 3)
A_ : int = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
A_ : int = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy' )
assert_mean_pixel_difference(snake_case_ , snake_case_ )
# pipeline 2
_start_torch_memory_measurement()
A_ : str = torch.Generator(device='cpu' ).manual_seed(0 )
A_ : str = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(snake_case_ )
A_ : str = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case_ )
A_ : Optional[Any] = pipe_a(
prompt_embeds=snake_case_ , negative_prompt_embeds=snake_case_ , image=snake_case_ , original_image=snake_case_ , generator=snake_case_ , num_inference_steps=2 , output_type='np' , )
A_ : Tuple = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
A_ : int = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
A_ : List[str] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy' )
assert_mean_pixel_difference(snake_case_ , snake_case_ )
def lowerCamelCase_ ( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ ):
"""simple docstring"""
_start_torch_memory_measurement()
A_ : List[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case_ )
A_ : Dict = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(1 ) ).to(snake_case_ )
A_ : Tuple = torch.Generator(device='cpu' ).manual_seed(0 )
A_ : Union[str, Any] = pipe_a(
prompt_embeds=snake_case_ , negative_prompt_embeds=snake_case_ , image=snake_case_ , mask_image=snake_case_ , num_inference_steps=2 , generator=snake_case_ , output_type='np' , )
A_ : Dict = output.images[0]
assert image.shape == (6_4, 6_4, 3)
A_ : List[str] = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
A_ : str = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy' )
assert_mean_pixel_difference(snake_case_ , snake_case_ )
# pipeline 2
_start_torch_memory_measurement()
A_ : Tuple = torch.Generator(device='cpu' ).manual_seed(0 )
A_ : Optional[Any] = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(0 ) ).to(snake_case_ )
A_ : Dict = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(0 ) ).to(snake_case_ )
A_ : Union[str, Any] = floats_tensor((1, 3, 2_5_6, 2_5_6) , rng=random.Random(1 ) ).to(snake_case_ )
A_ : str = pipe_a(
prompt_embeds=snake_case_ , negative_prompt_embeds=snake_case_ , image=snake_case_ , mask_image=snake_case_ , original_image=snake_case_ , generator=snake_case_ , num_inference_steps=2 , output_type='np' , )
A_ : Any = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
A_ : int = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
A_ : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy' )
assert_mean_pixel_difference(snake_case_ , snake_case_ )
def UpperCAmelCase__ ( ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats() | 302 | 0 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = args.log_outputs
lowercase__ = '''_'''.join(args.dataset.split('''/''' ) + [args.config, args.split] )
# load metric
lowercase__ = load_metric('''wer''' )
lowercase__ = load_metric('''cer''' )
# compute metrics
lowercase__ = wer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
lowercase__ = cer.compute(references=result['''target'''] , predictions=result['''prediction'''] )
# print & log results
lowercase__ = f'WER: {wer_result}\nCER: {cer_result}'
print(SCREAMING_SNAKE_CASE )
with open(f'{dataset_id}_eval_results.txt' , '''w''' ) as f:
f.write(SCREAMING_SNAKE_CASE )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
lowercase__ = f'log_{dataset_id}_predictions.txt'
lowercase__ = f'log_{dataset_id}_targets.txt'
with open(SCREAMING_SNAKE_CASE , '''w''' ) as p, open(SCREAMING_SNAKE_CASE , '''w''' ) as t:
# mapping function to write output
def write_to_file(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
p.write(f'{i}' + '''\n''' )
p.write(batch['''prediction'''] + '''\n''' )
t.write(f'{i}' + '''\n''' )
t.write(batch['''target'''] + '''\n''' )
result.map(SCREAMING_SNAKE_CASE , with_indices=SCREAMING_SNAKE_CASE )
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = '''[,?.!\-\;\:"“%‘”�—’…–]''' # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
lowercase__ = re.sub(SCREAMING_SNAKE_CASE , '''''' , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
lowercase__ = ['''\n\n''', '''\n''', ''' ''', ''' ''']
for t in token_sequences_to_ignore:
lowercase__ = ''' '''.join(text.split(SCREAMING_SNAKE_CASE ) )
return text
def _a ( SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=SCREAMING_SNAKE_CASE )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
lowercase__ = AutoFeatureExtractor.from_pretrained(args.model_id )
lowercase__ = feature_extractor.sampling_rate
# resample audio
lowercase__ = dataset.cast_column('''audio''' , Audio(sampling_rate=SCREAMING_SNAKE_CASE ) )
# load eval pipeline
if args.device is None:
lowercase__ = 0 if torch.cuda.is_available() else -1
lowercase__ = pipeline('''automatic-speech-recognition''' , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(SCREAMING_SNAKE_CASE ):
lowercase__ = asr(
batch['''audio''']['''array'''] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
lowercase__ = prediction['''text''']
lowercase__ = normalize_text(batch['''sentence'''] )
return batch
# run inference on all examples
lowercase__ = dataset.map(SCREAMING_SNAKE_CASE , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'--model_id', type=str, required=True, help='Model identifier. Should be loadable with 🤗 Transformers'
)
parser.add_argument(
'--dataset',
type=str,
required=True,
help='Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets',
)
parser.add_argument(
'--config', type=str, required=True, help='Config of the dataset. *E.g.* `\'en\'` for Common Voice'
)
parser.add_argument('--split', type=str, required=True, help='Split of the dataset. *E.g.* `\'test\'`')
parser.add_argument(
'--chunk_length_s', type=float, default=None, help='Chunk length in seconds. Defaults to 5 seconds.'
)
parser.add_argument(
'--stride_length_s', type=float, default=None, help='Stride of the audio chunks. Defaults to 1 second.'
)
parser.add_argument(
'--log_outputs', action='store_true', help='If defined, write outputs to log file for analysis.'
)
parser.add_argument(
'--device',
type=int,
default=None,
help='The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.',
)
lowerCAmelCase = parser.parse_args()
main(args)
| 43 |
import torch
from diffusers import DDIMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCamelCase__ ( UpperCAmelCase ):
"""simple docstring"""
_UpperCamelCase : int = (DDIMParallelScheduler,)
_UpperCamelCase : List[Any] = (('eta', 0.0), ('num_inference_steps', 50))
def snake_case__ ( self , **snake_case ):
'''simple docstring'''
UpperCamelCase__ = {
"num_train_timesteps": 1000,
"beta_start": 0.0001,
"beta_end": 0.02,
"beta_schedule": "linear",
"clip_sample": True,
}
config.update(**snake_case )
return config
def snake_case__ ( self , **snake_case ):
'''simple docstring'''
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config(**snake_case )
UpperCamelCase__ = scheduler_class(**snake_case )
UpperCamelCase__, UpperCamelCase__ = 10, 0.0
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter
scheduler.set_timesteps(snake_case )
for t in scheduler.timesteps:
UpperCamelCase__ = model(snake_case , snake_case )
UpperCamelCase__ = scheduler.step(snake_case , snake_case , snake_case , snake_case ).prev_sample
return sample
def snake_case__ ( self ):
'''simple docstring'''
for timesteps in [100, 500, 1000]:
self.check_over_configs(num_train_timesteps=snake_case )
def snake_case__ ( self ):
'''simple docstring'''
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=snake_case )
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config(steps_offset=1 )
UpperCamelCase__ = scheduler_class(**snake_case )
scheduler.set_timesteps(5 )
assert torch.equal(scheduler.timesteps , torch.LongTensor([801, 601, 401, 201, 1] ) )
def snake_case__ ( self ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=snake_case , beta_end=snake_case )
def snake_case__ ( self ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=snake_case )
def snake_case__ ( self ):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case )
def snake_case__ ( self ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=snake_case )
def snake_case__ ( self ):
'''simple docstring'''
for timestep_spacing in ["trailing", "leading"]:
self.check_over_configs(timestep_spacing=snake_case )
def snake_case__ ( self ):
'''simple docstring'''
for rescale_betas_zero_snr in [True, False]:
self.check_over_configs(rescale_betas_zero_snr=snake_case )
def snake_case__ ( self ):
'''simple docstring'''
self.check_over_configs(thresholding=snake_case )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(
thresholding=snake_case , prediction_type=snake_case , sample_max_value=snake_case , )
def snake_case__ ( self ):
'''simple docstring'''
for t in [1, 10, 49]:
self.check_over_forward(time_step=snake_case )
def snake_case__ ( self ):
'''simple docstring'''
for t, num_inference_steps in zip([1, 10, 50] , [10, 50, 500] ):
self.check_over_forward(time_step=snake_case , num_inference_steps=snake_case )
def snake_case__ ( self ):
'''simple docstring'''
for t, eta in zip([1, 10, 49] , [0.0, 0.5, 1.0] ):
self.check_over_forward(time_step=snake_case , eta=snake_case )
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**snake_case )
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(420 , 400 ) - 0.14771 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(980 , 960 ) - 0.32460 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(0 , 0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 , 486 ) - 0.00979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 , 998 ) - 0.02 ) ) < 1E-5
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.scheduler_classes[0]
UpperCamelCase__ = self.get_scheduler_config()
UpperCamelCase__ = scheduler_class(**snake_case )
UpperCamelCase__, UpperCamelCase__ = 10, 0.0
scheduler.set_timesteps(snake_case )
UpperCamelCase__ = self.dummy_model()
UpperCamelCase__ = self.dummy_sample_deter
UpperCamelCase__ = self.dummy_sample_deter + 0.1
UpperCamelCase__ = self.dummy_sample_deter - 0.1
UpperCamelCase__ = samplea.shape[0]
UpperCamelCase__ = torch.stack([samplea, samplea, samplea] , dim=0 )
UpperCamelCase__ = torch.arange(snake_case )[0:3, None].repeat(1 , snake_case )
UpperCamelCase__ = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
UpperCamelCase__ = scheduler.batch_step_no_noise(snake_case , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) , snake_case )
UpperCamelCase__ = torch.sum(torch.abs(snake_case ) )
UpperCamelCase__ = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 1147.7904 ) < 1E-2
assert abs(result_mean.item() - 0.4982 ) < 1E-3
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.full_loop()
UpperCamelCase__ = torch.sum(torch.abs(snake_case ) )
UpperCamelCase__ = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 172.0067 ) < 1E-2
assert abs(result_mean.item() - 0.223967 ) < 1E-3
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.full_loop(prediction_type="v_prediction" )
UpperCamelCase__ = torch.sum(torch.abs(snake_case ) )
UpperCamelCase__ = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 52.5302 ) < 1E-2
assert abs(result_mean.item() - 0.0684 ) < 1E-3
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.full_loop(set_alpha_to_one=snake_case , beta_start=0.01 )
UpperCamelCase__ = torch.sum(torch.abs(snake_case ) )
UpperCamelCase__ = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 149.8295 ) < 1E-2
assert abs(result_mean.item() - 0.1951 ) < 1E-3
def snake_case__ ( self ):
'''simple docstring'''
UpperCamelCase__ = self.full_loop(set_alpha_to_one=snake_case , beta_start=0.01 )
UpperCamelCase__ = torch.sum(torch.abs(snake_case ) )
UpperCamelCase__ = torch.mean(torch.abs(snake_case ) )
assert abs(result_sum.item() - 149.0784 ) < 1E-2
assert abs(result_mean.item() - 0.1941 ) < 1E-3
| 551 | 0 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : List[Any] = logging.getLogger(__name__)
@dataclass(frozen=_A )
class _lowercase :
_a : List[str] = 42
_a : Tuple = 42
_a : Optional[int] = None
_a : Dict = None
_a : int = None
@dataclass(frozen=_A )
class _lowercase :
_a : Tuple = 42
_a : Dict = None
_a : List[str] = None
_a : Tuple = None
_a : List[str] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class _lowercase ( _A ):
_a : List[Any] = 42
def __init__( self , a , a , a , a = None , a=False , a = False , ):
snake_case__ : Optional[Any] =hans_processors[task]()
snake_case__ : Optional[Any] =os.path.join(
_lowerCAmelCase , """cached_{}_{}_{}_{}""".format(
"""dev""" if evaluate else """train""" , tokenizer.__class__.__name__ , str(_lowerCAmelCase ) , _lowerCAmelCase , ) , )
snake_case__ : Optional[int] =processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
snake_case__ , snake_case__ : List[str] =label_list[2], label_list[1]
snake_case__ : Optional[Any] =label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case__ : Dict =cached_features_file + """.lock"""
with FileLock(_lowerCAmelCase ):
if os.path.exists(_lowerCAmelCase ) and not overwrite_cache:
logger.info(F"Loading features from cached file {cached_features_file}" )
snake_case__ : Tuple =torch.load(_lowerCAmelCase )
else:
logger.info(F"Creating features from dataset file at {data_dir}" )
snake_case__ : Optional[int] =(
processor.get_dev_examples(_lowerCAmelCase ) if evaluate else processor.get_train_examples(_lowerCAmelCase )
)
logger.info("""Training examples: %s""" , len(_lowerCAmelCase ) )
snake_case__ : Dict =hans_convert_examples_to_features(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
logger.info("""Saving features into cached file %s""" , _lowerCAmelCase )
torch.save(self.features , _lowerCAmelCase )
def __len__( self ):
return len(self.features )
def __getitem__( self , a ):
return self.features[i]
def lowercase__ ( self ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class _lowercase :
_a : List[str] = 42
def __init__( self , a , a , a , a = 1_2_8 , a=False , a = False , ):
snake_case__ : Dict =hans_processors[task]()
snake_case__ : Tuple =processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
snake_case__ , snake_case__ : Tuple =label_list[2], label_list[1]
snake_case__ : Any =label_list
snake_case__ : str =processor.get_dev_examples(_lowerCAmelCase ) if evaluate else processor.get_train_examples(_lowerCAmelCase )
snake_case__ : List[str] =hans_convert_examples_to_features(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="""convert examples to features""" ):
if ex_index % 1_0_0_0_0 == 0:
logger.info("""Writing example %d of %d""" % (ex_index, len(_lowerCAmelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
snake_case__ : Optional[Any] =tf.data.Dataset.from_generator(
_lowerCAmelCase , (
{
"""example_id""": tf.intaa,
"""input_ids""": tf.intaa,
"""attention_mask""": tf.intaa,
"""token_type_ids""": tf.intaa,
},
tf.intaa,
) , (
{
"""example_id""": tf.TensorShape([] ),
"""input_ids""": tf.TensorShape([None, None] ),
"""attention_mask""": tf.TensorShape([None, None] ),
"""token_type_ids""": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def lowercase__ ( self ):
return self.dataset
def __len__( self ):
return len(self.features )
def __getitem__( self , a ):
return self.features[i]
def lowercase__ ( self ):
return self.label_list
class _lowercase ( _A ):
def lowercase__ ( self , a ):
return self._create_examples(self._read_tsv(os.path.join(_lowerCAmelCase , """heuristics_train_set.txt""" ) ) , """train""" )
def lowercase__ ( self , a ):
return self._create_examples(self._read_tsv(os.path.join(_lowerCAmelCase , """heuristics_evaluation_set.txt""" ) ) , """dev""" )
def lowercase__ ( self ):
return ["contradiction", "entailment", "neutral"]
def lowercase__ ( self , a , a ):
snake_case__ : int =[]
for i, line in enumerate(_lowerCAmelCase ):
if i == 0:
continue
snake_case__ : Union[str, Any] ="""%s-%s""" % (set_type, line[0])
snake_case__ : Union[str, Any] =line[5]
snake_case__ : int =line[6]
snake_case__ : List[str] =line[7][2:] if line[7].startswith("""ex""" ) else line[7]
snake_case__ : Optional[int] =line[0]
examples.append(InputExample(guid=_lowerCAmelCase , text_a=_lowerCAmelCase , text_b=_lowerCAmelCase , label=_lowerCAmelCase , pairID=_lowerCAmelCase ) )
return examples
def A__ ( _a : List[InputExample] , _a : List[str] , _a : int , _a : PreTrainedTokenizer , ):
'''simple docstring'''
snake_case__ : Union[str, Any] ={label: i for i, label in enumerate(__lowerCAmelCase )}
snake_case__ : Tuple =[]
for ex_index, example in tqdm.tqdm(enumerate(__lowerCAmelCase ) , desc="""convert examples to features""" ):
if ex_index % 10000 == 0:
logger.info("""Writing example %d""" % (ex_index) )
snake_case__ : Tuple =tokenizer(
example.text_a , example.text_b , add_special_tokens=__lowerCAmelCase , max_length=__lowerCAmelCase , padding="""max_length""" , truncation=__lowerCAmelCase , return_overflowing_tokens=__lowerCAmelCase , )
snake_case__ : List[Any] =label_map[example.label] if example.label in label_map else 0
snake_case__ : Optional[int] =int(example.pairID )
features.append(InputFeatures(**__lowerCAmelCase , label=__lowerCAmelCase , pairID=__lowerCAmelCase ) )
for i, example in enumerate(examples[:5] ):
logger.info("""*** Example ***""" )
logger.info(f"guid: {example}" )
logger.info(f"features: {features[i]}" )
return features
__lowerCamelCase : List[Any] = {
'hans': 3,
}
__lowerCamelCase : Tuple = {
'hans': HansProcessor,
}
| 709 |
import math
def A__ ( _a : list , _a : int = 0 , _a : int = 0 ):
'''simple docstring'''
snake_case__ : Tuple =end or len(_a )
for i in range(_a , _a ):
snake_case__ : int =i
snake_case__ : int =array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
snake_case__ : Dict =array[temp_index - 1]
temp_index -= 1
snake_case__ : Dict =temp_index_value
return array
def A__ ( _a : list , _a : int , _a : int ): # Max Heap
'''simple docstring'''
snake_case__ : Tuple =index
snake_case__ : Dict =2 * index + 1 # Left Node
snake_case__ : Union[str, Any] =2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
snake_case__ : Dict =left_index
if right_index < heap_size and array[largest] < array[right_index]:
snake_case__ : List[Any] =right_index
if largest != index:
snake_case__ , snake_case__ : List[Any] =array[largest], array[index]
heapify(_a , _a , _a )
def A__ ( _a : list ):
'''simple docstring'''
snake_case__ : List[str] =len(_a )
for i in range(n // 2 , -1 , -1 ):
heapify(_a , _a , _a )
for i in range(n - 1 , 0 , -1 ):
snake_case__ , snake_case__ : Tuple =array[0], array[i]
heapify(_a , 0 , _a )
return array
def A__ ( _a : list , _a : int , _a : int , _a : int ):
'''simple docstring'''
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def A__ ( _a : list , _a : int , _a : int , _a : int ):
'''simple docstring'''
snake_case__ : Dict =low
snake_case__ : Union[str, Any] =high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
snake_case__ , snake_case__ : List[Any] =array[j], array[i]
i += 1
def A__ ( _a : list ):
'''simple docstring'''
if len(_a ) == 0:
return array
snake_case__ : Optional[int] =2 * math.ceil(math.loga(len(_a ) ) )
snake_case__ : Tuple =16
return intro_sort(_a , 0 , len(_a ) , _a , _a )
def A__ ( _a : list , _a : int , _a : int , _a : int , _a : int ):
'''simple docstring'''
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(_a )
max_depth -= 1
snake_case__ : Dict =median_of_a(_a , _a , start + ((end - start) // 2) + 1 , end - 1 )
snake_case__ : List[str] =partition(_a , _a , _a , _a )
intro_sort(_a , _a , _a , _a , _a )
snake_case__ : Union[str, Any] =p
return insertion_sort(_a , _a , _a )
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCamelCase : Optional[int] = input("""Enter numbers separated by a comma : """).strip()
__lowerCamelCase : Dict = [float(item) for item in user_input.split(""",""")]
print(sort(unsorted))
| 448 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
_SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCAmelCase_ ( __magic_name__ ):
def __init__( self , _lowerCAmelCase , _lowerCAmelCase ) -> Any:
super().__init__()
self.register_modules(unet=_lowerCAmelCase , scheduler=_lowerCAmelCase )
@torch.no_grad()
def __call__( self , _lowerCAmelCase = 1 , _lowerCAmelCase = 100 , _lowerCAmelCase = None , _lowerCAmelCase = None , _lowerCAmelCase = True , ) -> Union[AudioPipelineOutput, Tuple]:
if audio_length_in_s is None:
_lowerCAmelCase = self.unet.config.sample_size / self.unet.config.sample_rate
_lowerCAmelCase = audio_length_in_s * self.unet.config.sample_rate
_lowerCAmelCase = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
_lowerCAmelCase = int(_lowerCAmelCase )
if sample_size % down_scale_factor != 0:
_lowerCAmelCase = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
" process." )
_lowerCAmelCase = int(_lowerCAmelCase )
_lowerCAmelCase = next(iter(self.unet.parameters() ) ).dtype
_lowerCAmelCase = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(_lowerCAmelCase , _lowerCAmelCase ) and len(_lowerCAmelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(_lowerCAmelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
_lowerCAmelCase = randn_tensor(_lowerCAmelCase , generator=_lowerCAmelCase , device=self.device , dtype=_lowerCAmelCase )
# set step values
self.scheduler.set_timesteps(_lowerCAmelCase , device=audio.device )
_lowerCAmelCase = self.scheduler.timesteps.to(_lowerCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_lowerCAmelCase = self.unet(_lowerCAmelCase , _lowerCAmelCase ).sample
# 2. compute previous image: x_t -> t_t-1
_lowerCAmelCase = self.scheduler.step(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ).prev_sample
_lowerCAmelCase = audio.clamp(-1 , 1 ).float().cpu().numpy()
_lowerCAmelCase = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=_lowerCAmelCase )
| 18 |
'''simple docstring'''
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Tuple=None ):
'''simple docstring'''
_lowerCAmelCase = None
if token is not None:
_lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_lowerCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'''
_lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json()
_lowerCAmelCase = {}
try:
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
_lowerCAmelCase = math.ceil((result["total_count"] - 100) / 100 )
for i in range(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=SCREAMING_SNAKE_CASE_ ).json()
job_links.update({job["name"]: job["html_url"] for job in result["jobs"]} )
return job_links
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Dict=None ):
'''simple docstring'''
_lowerCAmelCase = None
if token is not None:
_lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_lowerCAmelCase = F'''https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'''
_lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ ).json()
_lowerCAmelCase = {}
try:
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
_lowerCAmelCase = math.ceil((result["total_count"] - 100) / 100 )
for i in range(SCREAMING_SNAKE_CASE_ ):
_lowerCAmelCase = requests.get(url + F'''&page={i + 2}''' , headers=SCREAMING_SNAKE_CASE_ ).json()
artifacts.update({artifact["name"]: artifact["archive_download_url"] for artifact in result["artifacts"]} )
return artifacts
except Exception:
print(F'''Unknown error, could not fetch links:\n{traceback.format_exc()}''' )
return {}
def __a(SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Any , SCREAMING_SNAKE_CASE_ : Dict ):
'''simple docstring'''
_lowerCAmelCase = None
if token is not None:
_lowerCAmelCase = {"Accept": "application/vnd.github+json", "Authorization": F'''Bearer {token}'''}
_lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , headers=SCREAMING_SNAKE_CASE_ , allow_redirects=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = result.headers["Location"]
_lowerCAmelCase = requests.get(SCREAMING_SNAKE_CASE_ , allow_redirects=SCREAMING_SNAKE_CASE_ )
_lowerCAmelCase = os.path.join(SCREAMING_SNAKE_CASE_ , F'''{artifact_name}.zip''' )
with open(SCREAMING_SNAKE_CASE_ , "wb" ) as fp:
fp.write(response.content )
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Optional[Any]=None ):
'''simple docstring'''
_lowerCAmelCase = []
_lowerCAmelCase = []
_lowerCAmelCase = None
with zipfile.ZipFile(SCREAMING_SNAKE_CASE_ ) as z:
for filename in z.namelist():
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(SCREAMING_SNAKE_CASE_ ) as f:
for line in f:
_lowerCAmelCase = line.decode("UTF-8" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_lowerCAmelCase = line[: line.index(": " )]
_lowerCAmelCase = line[line.index(": " ) + len(": " ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("FAILED " ):
# `test` is the test method that failed
_lowerCAmelCase = line[len("FAILED " ) :]
failed_tests.append(SCREAMING_SNAKE_CASE_ )
elif filename == "job_name.txt":
_lowerCAmelCase = line
if len(SCREAMING_SNAKE_CASE_ ) != len(SCREAMING_SNAKE_CASE_ ):
raise ValueError(
F'''`errors` and `failed_tests` should have the same number of elements. Got {len(SCREAMING_SNAKE_CASE_ )} for `errors` '''
F'''and {len(SCREAMING_SNAKE_CASE_ )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'''
" problem." )
_lowerCAmelCase = None
if job_name and job_links:
_lowerCAmelCase = job_links.get(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# A list with elements of the form (line of error, error, failed test)
_lowerCAmelCase = [x + [y] + [job_link] for x, y in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )]
return result
def __a(SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : Tuple=None ):
'''simple docstring'''
_lowerCAmelCase = []
_lowerCAmelCase = [os.path.join(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) for p in os.listdir(SCREAMING_SNAKE_CASE_ ) if p.endswith(".zip" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(SCREAMING_SNAKE_CASE_ , job_links=SCREAMING_SNAKE_CASE_ ) )
return errors
def __a(SCREAMING_SNAKE_CASE_ : List[str] , SCREAMING_SNAKE_CASE_ : str=None ):
'''simple docstring'''
_lowerCAmelCase = Counter()
counter.update([x[1] for x in logs] )
_lowerCAmelCase = counter.most_common()
_lowerCAmelCase = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_lowerCAmelCase = {"count": count, "failed_tests": [(x[2], x[0]) for x in logs if x[1] == error]}
_lowerCAmelCase = dict(sorted(r.items() , key=lambda SCREAMING_SNAKE_CASE_ : item[1]["count"] , reverse=SCREAMING_SNAKE_CASE_ ) )
return r
def __a(SCREAMING_SNAKE_CASE_ : List[str] ):
'''simple docstring'''
_lowerCAmelCase = test.split("::" )[0]
if test.startswith("tests/models/" ):
_lowerCAmelCase = test.split("/" )[2]
else:
_lowerCAmelCase = None
return test
def __a(SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : Tuple=None ):
'''simple docstring'''
_lowerCAmelCase = [(x[0], x[1], get_model(x[2] )) for x in logs]
_lowerCAmelCase = [x for x in logs if x[2] is not None]
_lowerCAmelCase = {x[2] for x in logs}
_lowerCAmelCase = {}
for test in tests:
_lowerCAmelCase = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_lowerCAmelCase = counter.most_common()
_lowerCAmelCase = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_lowerCAmelCase = sum(error_counts.values() )
if n_errors > 0:
_lowerCAmelCase = {"count": n_errors, "errors": error_counts}
_lowerCAmelCase = dict(sorted(r.items() , key=lambda SCREAMING_SNAKE_CASE_ : item[1]["count"] , reverse=SCREAMING_SNAKE_CASE_ ) )
return r
def __a(SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase = "| no. | error | status |"
_lowerCAmelCase = "|-:|:-|:-|"
_lowerCAmelCase = [header, sep]
for error in reduced_by_error:
_lowerCAmelCase = reduced_by_error[error]["count"]
_lowerCAmelCase = F'''| {count} | {error[:100]} | |'''
lines.append(SCREAMING_SNAKE_CASE_ )
return "\n".join(SCREAMING_SNAKE_CASE_ )
def __a(SCREAMING_SNAKE_CASE_ : Optional[int] ):
'''simple docstring'''
_lowerCAmelCase = "| model | no. of errors | major error | count |"
_lowerCAmelCase = "|-:|-:|-:|-:|"
_lowerCAmelCase = [header, sep]
for model in reduced_by_model:
_lowerCAmelCase = reduced_by_model[model]["count"]
_lowerCAmelCase , _lowerCAmelCase = list(reduced_by_model[model]["errors"].items() )[0]
_lowerCAmelCase = F'''| {model} | {count} | {error[:60]} | {_count} |'''
lines.append(SCREAMING_SNAKE_CASE_ )
return "\n".join(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--workflow_run_id", type=str, required=True, help="A GitHub Actions workflow run id.")
parser.add_argument(
"--output_dir",
type=str,
required=True,
help="Where to store the downloaded artifacts and other result files.",
)
parser.add_argument("--token", default=None, type=str, help="A token that has actions:read permission.")
_SCREAMING_SNAKE_CASE = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_SCREAMING_SNAKE_CASE = get_job_links(args.workflow_run_id, token=args.token)
_SCREAMING_SNAKE_CASE = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
_SCREAMING_SNAKE_CASE = k.find(" / ")
_SCREAMING_SNAKE_CASE = k[index + len(" / ") :]
_SCREAMING_SNAKE_CASE = v
with open(os.path.join(args.output_dir, "job_links.json"), "w", encoding="UTF-8") as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
_SCREAMING_SNAKE_CASE = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, "artifacts.json"), "w", encoding="UTF-8") as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
_SCREAMING_SNAKE_CASE = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
_SCREAMING_SNAKE_CASE = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
_SCREAMING_SNAKE_CASE = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, "errors.json"), "w", encoding="UTF-8") as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
_SCREAMING_SNAKE_CASE = reduce_by_error(errors)
_SCREAMING_SNAKE_CASE = reduce_by_model(errors)
_SCREAMING_SNAKE_CASE = make_github_table(reduced_by_error)
_SCREAMING_SNAKE_CASE = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, "reduced_by_error.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, "reduced_by_model.txt"), "w", encoding="UTF-8") as fp:
fp.write(sa)
| 18 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class _UpperCAmelCase ( unittest.TestCase ):
def a_ ( self ) -> int:
UpperCAmelCase = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 1_2_8, 'min_length': 1_2, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 1_4_2, 'min_length': 5_6, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 6_2, 'min_length': 1_1, 'num_beams': 6},
}
}
UpperCAmelCase = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 1_2_8,
'task_specific_params.summarization.min_length': 1_2,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 1_4_2,
'task_specific_params.summarization_cnn.min_length': 5_6,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 6_2,
'task_specific_params.summarization_xsum.min_length': 1_1,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(lowercase_ ) , lowercase_ )
def a_ ( self ) -> Tuple:
UpperCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(lowercase_ ) , x.transpose() ) )
UpperCAmelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(lowercase_ , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def a_ ( self ) -> Union[str, Any]:
UpperCAmelCase = np.random.randn(3 , 4 )
UpperCAmelCase = torch.tensor(lowercase_ )
self.assertTrue(np.allclose(transpose(lowercase_ ) , transpose(lowercase_ ).numpy() ) )
UpperCAmelCase = np.random.randn(3 , 4 , 5 )
UpperCAmelCase = torch.tensor(lowercase_ )
self.assertTrue(np.allclose(transpose(lowercase_ , axes=(1, 2, 0) ) , transpose(lowercase_ , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def a_ ( self ) -> str:
UpperCAmelCase = np.random.randn(3 , 4 )
UpperCAmelCase = tf.constant(lowercase_ )
self.assertTrue(np.allclose(transpose(lowercase_ ) , transpose(lowercase_ ).numpy() ) )
UpperCAmelCase = np.random.randn(3 , 4 , 5 )
UpperCAmelCase = tf.constant(lowercase_ )
self.assertTrue(np.allclose(transpose(lowercase_ , axes=(1, 2, 0) ) , transpose(lowercase_ , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def a_ ( self ) -> Any:
UpperCAmelCase = np.random.randn(3 , 4 )
UpperCAmelCase = jnp.array(lowercase_ )
self.assertTrue(np.allclose(transpose(lowercase_ ) , np.asarray(transpose(lowercase_ ) ) ) )
UpperCAmelCase = np.random.randn(3 , 4 , 5 )
UpperCAmelCase = jnp.array(lowercase_ )
self.assertTrue(np.allclose(transpose(lowercase_ , axes=(1, 2, 0) ) , np.asarray(transpose(lowercase_ , axes=(1, 2, 0) ) ) ) )
def a_ ( self ) -> Optional[int]:
UpperCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(lowercase_ , (4, 3) ) , np.reshape(lowercase_ , (4, 3) ) ) )
UpperCAmelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(lowercase_ , (1_2, 5) ) , np.reshape(lowercase_ , (1_2, 5) ) ) )
@require_torch
def a_ ( self ) -> int:
UpperCAmelCase = np.random.randn(3 , 4 )
UpperCAmelCase = torch.tensor(lowercase_ )
self.assertTrue(np.allclose(reshape(lowercase_ , (4, 3) ) , reshape(lowercase_ , (4, 3) ).numpy() ) )
UpperCAmelCase = np.random.randn(3 , 4 , 5 )
UpperCAmelCase = torch.tensor(lowercase_ )
self.assertTrue(np.allclose(reshape(lowercase_ , (1_2, 5) ) , reshape(lowercase_ , (1_2, 5) ).numpy() ) )
@require_tf
def a_ ( self ) -> Union[str, Any]:
UpperCAmelCase = np.random.randn(3 , 4 )
UpperCAmelCase = tf.constant(lowercase_ )
self.assertTrue(np.allclose(reshape(lowercase_ , (4, 3) ) , reshape(lowercase_ , (4, 3) ).numpy() ) )
UpperCAmelCase = np.random.randn(3 , 4 , 5 )
UpperCAmelCase = tf.constant(lowercase_ )
self.assertTrue(np.allclose(reshape(lowercase_ , (1_2, 5) ) , reshape(lowercase_ , (1_2, 5) ).numpy() ) )
@require_flax
def a_ ( self ) -> int:
UpperCAmelCase = np.random.randn(3 , 4 )
UpperCAmelCase = jnp.array(lowercase_ )
self.assertTrue(np.allclose(reshape(lowercase_ , (4, 3) ) , np.asarray(reshape(lowercase_ , (4, 3) ) ) ) )
UpperCAmelCase = np.random.randn(3 , 4 , 5 )
UpperCAmelCase = jnp.array(lowercase_ )
self.assertTrue(np.allclose(reshape(lowercase_ , (1_2, 5) ) , np.asarray(reshape(lowercase_ , (1_2, 5) ) ) ) )
def a_ ( self ) -> Any:
UpperCAmelCase = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(lowercase_ ) , np.squeeze(lowercase_ ) ) )
UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(lowercase_ , axis=2 ) , np.squeeze(lowercase_ , axis=2 ) ) )
@require_torch
def a_ ( self ) -> Union[str, Any]:
UpperCAmelCase = np.random.randn(1 , 3 , 4 )
UpperCAmelCase = torch.tensor(lowercase_ )
self.assertTrue(np.allclose(squeeze(lowercase_ ) , squeeze(lowercase_ ).numpy() ) )
UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
UpperCAmelCase = torch.tensor(lowercase_ )
self.assertTrue(np.allclose(squeeze(lowercase_ , axis=2 ) , squeeze(lowercase_ , axis=2 ).numpy() ) )
@require_tf
def a_ ( self ) -> str:
UpperCAmelCase = np.random.randn(1 , 3 , 4 )
UpperCAmelCase = tf.constant(lowercase_ )
self.assertTrue(np.allclose(squeeze(lowercase_ ) , squeeze(lowercase_ ).numpy() ) )
UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
UpperCAmelCase = tf.constant(lowercase_ )
self.assertTrue(np.allclose(squeeze(lowercase_ , axis=2 ) , squeeze(lowercase_ , axis=2 ).numpy() ) )
@require_flax
def a_ ( self ) -> List[str]:
UpperCAmelCase = np.random.randn(1 , 3 , 4 )
UpperCAmelCase = jnp.array(lowercase_ )
self.assertTrue(np.allclose(squeeze(lowercase_ ) , np.asarray(squeeze(lowercase_ ) ) ) )
UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
UpperCAmelCase = jnp.array(lowercase_ )
self.assertTrue(np.allclose(squeeze(lowercase_ , axis=2 ) , np.asarray(squeeze(lowercase_ , axis=2 ) ) ) )
def a_ ( self ) -> List[Any]:
UpperCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(lowercase_ , axis=1 ) , np.expand_dims(lowercase_ , axis=1 ) ) )
@require_torch
def a_ ( self ) -> Union[str, Any]:
UpperCAmelCase = np.random.randn(3 , 4 )
UpperCAmelCase = torch.tensor(lowercase_ )
self.assertTrue(np.allclose(expand_dims(lowercase_ , axis=1 ) , expand_dims(lowercase_ , axis=1 ).numpy() ) )
@require_tf
def a_ ( self ) -> Optional[Any]:
UpperCAmelCase = np.random.randn(3 , 4 )
UpperCAmelCase = tf.constant(lowercase_ )
self.assertTrue(np.allclose(expand_dims(lowercase_ , axis=1 ) , expand_dims(lowercase_ , axis=1 ).numpy() ) )
@require_flax
def a_ ( self ) -> List[Any]:
UpperCAmelCase = np.random.randn(3 , 4 )
UpperCAmelCase = jnp.array(lowercase_ )
self.assertTrue(np.allclose(expand_dims(lowercase_ , axis=1 ) , np.asarray(expand_dims(lowercase_ , axis=1 ) ) ) )
| 183 |
"""simple docstring"""
import sys
import turtle
def lowercase__ ( lowerCAmelCase : tuple[float, float] , lowerCAmelCase : tuple[float, float] ) -> tuple[float, float]:
"""simple docstring"""
return (pa[0] + pa[0]) / 2, (pa[1] + pa[1]) / 2
def lowercase__ ( lowerCAmelCase : tuple[float, float] , lowerCAmelCase : tuple[float, float] , lowerCAmelCase : tuple[float, float] , lowerCAmelCase : int , ) -> None:
"""simple docstring"""
my_pen.up()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.down()
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
my_pen.goto(vertexa[0] , vertexa[1] )
if depth == 0:
return
triangle(lowerCAmelCase , get_mid(lowerCAmelCase , lowerCAmelCase ) , get_mid(lowerCAmelCase , lowerCAmelCase ) , depth - 1 )
triangle(lowerCAmelCase , get_mid(lowerCAmelCase , lowerCAmelCase ) , get_mid(lowerCAmelCase , lowerCAmelCase ) , depth - 1 )
triangle(lowerCAmelCase , get_mid(lowerCAmelCase , lowerCAmelCase ) , get_mid(lowerCAmelCase , lowerCAmelCase ) , depth - 1 )
if __name__ == "__main__":
if len(sys.argv) != 2:
raise ValueError(
'''Correct format for using this script: '''
'''python fractals.py <int:depth_for_fractal>'''
)
SCREAMING_SNAKE_CASE_ = turtle.Turtle()
my_pen.ht()
my_pen.speed(5)
my_pen.pencolor('''red''')
SCREAMING_SNAKE_CASE_ = [(-175, -125), (0, 175), (175, -125)] # vertices of triangle
triangle(vertices[0], vertices[1], vertices[2], int(sys.argv[1]))
| 183 | 1 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase_ : Optional[int] = logging.get_logger(__name__)
lowerCamelCase_ : Optional[int] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase_ : List[str] = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
lowerCamelCase_ : str = {
"""gpt2""": 10_24,
"""gpt2-medium""": 10_24,
"""gpt2-large""": 10_24,
"""gpt2-xl""": 10_24,
"""distilgpt2""": 10_24,
}
class _UpperCamelCase ( _A ):
'''simple docstring'''
__UpperCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
__UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : List[Any] = ["""input_ids""", """attention_mask"""]
__UpperCamelCase : List[str] = GPTaTokenizer
def __init__( self : str , snake_case_ : Optional[int]=None , snake_case_ : List[Any]=None , snake_case_ : Tuple=None , snake_case_ : List[Any]="<|endoftext|>" , snake_case_ : Union[str, Any]="<|endoftext|>" , snake_case_ : Optional[Any]="<|endoftext|>" , snake_case_ : List[str]=False , **snake_case_ : Optional[int] , ):
super().__init__(
snake_case_ , snake_case_ , tokenizer_file=snake_case_ , unk_token=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , add_prefix_space=snake_case_ , **snake_case_ , )
UpperCamelCase_: str = kwargs.pop("""add_bos_token""" , snake_case_ )
UpperCamelCase_: Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , snake_case_ ) != add_prefix_space:
UpperCamelCase_: int = getattr(snake_case_ , pre_tok_state.pop("""type""" ) )
UpperCamelCase_: Tuple = add_prefix_space
UpperCamelCase_: List[Any] = pre_tok_class(**snake_case_ )
UpperCamelCase_: List[str] = add_prefix_space
def lowerCAmelCase__ ( self : Union[str, Any] , *snake_case_ : Optional[Any] , **snake_case_ : int ):
UpperCamelCase_: str = kwargs.get("""is_split_into_words""" , snake_case_ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*snake_case_ , **snake_case_ )
def lowerCAmelCase__ ( self : Optional[int] , *snake_case_ : Tuple , **snake_case_ : Optional[Any] ):
UpperCamelCase_: int = kwargs.get("""is_split_into_words""" , snake_case_ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*snake_case_ , **snake_case_ )
def lowerCAmelCase__ ( self : Optional[int] , snake_case_ : str , snake_case_ : Optional[str] = None ):
UpperCamelCase_: int = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
def lowerCAmelCase__ ( self : Tuple , snake_case_ : "Conversation" ):
UpperCamelCase_: Dict = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(snake_case_ , add_special_tokens=snake_case_ ) + [self.eos_token_id] )
if len(snake_case_ ) > self.model_max_length:
UpperCamelCase_: Any = input_ids[-self.model_max_length :]
return input_ids
| 548 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase__ ( self : Dict ):
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
UpperCamelCase_: Optional[int] = [[1, 2, 4], [1, 2, 3, 4]]
UpperCamelCase_: List[str] = DisjunctiveConstraint(snake_case_ )
self.assertTrue(isinstance(dc.token_ids , snake_case_ ) )
with self.assertRaises(snake_case_ ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(snake_case_ ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def lowerCAmelCase__ ( self : Optional[Any] ):
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
UpperCamelCase_: Dict = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(snake_case_ ):
DisjunctiveConstraint(snake_case_ ) # fails here
def lowerCAmelCase__ ( self : int ):
UpperCamelCase_: Dict = [[1, 2, 3], [1, 2, 4]]
UpperCamelCase_: Dict = DisjunctiveConstraint(snake_case_ )
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: str = dc.update(1 )
UpperCamelCase_: Union[str, Any] = stepped is True and completed is False and reset is False
self.assertTrue(snake_case_ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: Optional[int] = dc.update(2 )
UpperCamelCase_: Tuple = stepped is True and completed is False and reset is False
self.assertTrue(snake_case_ )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: Optional[int] = dc.update(3 )
UpperCamelCase_: Union[str, Any] = stepped is True and completed is True and reset is False
self.assertTrue(snake_case_ )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def lowerCAmelCase__ ( self : List[str] ):
UpperCamelCase_: Optional[int] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
UpperCamelCase_: Tuple = DisjunctiveConstraint(snake_case_ )
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: Dict = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: Union[str, Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: Any = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: int = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: int = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: Tuple = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
UpperCamelCase_, UpperCamelCase_, UpperCamelCase_: List[str] = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] )
| 548 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = '''facebook/bart-large-mnli'''
lowerCAmelCase = (
'''This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which '''
'''should be the text to classify, and `labels`, which should be the list of labels to use for classification. '''
'''It returns the most likely label in the list of provided `labels` for the input text.'''
)
lowerCAmelCase = '''text_classifier'''
lowerCAmelCase = AutoTokenizer
lowerCAmelCase = AutoModelForSequenceClassification
lowerCAmelCase = ['''text''', ['''text''']]
lowerCAmelCase = ['''text''']
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
super().setup()
__A : List[str] = self.model.config
__A : List[str] = -1
for idx, label in config.idalabel.items():
if label.lower().startswith('entail'):
__A : str = int(A__)
if self.entailment_id == -1:
raise ValueError('Could not determine the entailment ID from the model config, please pass it at init.')
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase , _UpperCAmelCase):
'''simple docstring'''
__A : Tuple = labels
return self.pre_processor(
[text] * len(A__) , [F'This example is {label}' for label in labels] , return_tensors='pt' , padding='max_length' , )
def SCREAMING_SNAKE_CASE ( self , _UpperCAmelCase):
'''simple docstring'''
__A : List[Any] = outputs.logits
__A : int = torch.argmax(logits[:, 2]).item()
return self._labels[label_id] | 720 |
'''simple docstring'''
import torch
from diffusers import KDPMaDiscreteScheduler
from diffusers.utils import torch_device
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE (a__ ):
lowerCAmelCase = (KDPMaDiscreteScheduler,)
lowerCAmelCase = 10
def SCREAMING_SNAKE_CASE ( self , **_UpperCAmelCase):
'''simple docstring'''
__A : Optional[int] = {
'num_train_timesteps': 1100,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
}
config.update(**_UpperCAmelCase)
return config
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for beta_start, beta_end in zip([0.00001, 0.0001, 0.001] , [0.0002, 0.002, 0.02]):
self.check_over_configs(beta_start=_UpperCAmelCase , beta_end=_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=_UpperCAmelCase)
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
__A : Optional[Any] = self.scheduler_classes[0]
__A : str = self.get_scheduler_config(prediction_type='v_prediction')
__A : Optional[Any] = scheduler_class(**_UpperCAmelCase)
scheduler.set_timesteps(self.num_inference_steps)
__A : List[Any] = self.dummy_model()
__A : Optional[int] = self.dummy_sample_deter * scheduler.init_noise_sigma
__A : Any = sample.to(_UpperCAmelCase)
for i, t in enumerate(scheduler.timesteps):
__A : Optional[Any] = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase)
__A : Any = model(_UpperCAmelCase , _UpperCAmelCase)
__A : Tuple = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
__A : Union[str, Any] = output.prev_sample
__A : Any = torch.sum(torch.abs(_UpperCAmelCase))
__A : str = torch.mean(torch.abs(_UpperCAmelCase))
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 4.6_9_3_4e-0_7) < 1e-2
assert abs(result_mean.item() - 6.1_1_1_2e-1_0) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 4.6_9_3_4_2_8_6_5_0_1_7_0_9_7_2e-0_7) < 1e-2
assert abs(result_mean.item() - 0.0002) < 1e-3
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
if torch_device == "mps":
return
__A : Optional[Any] = self.scheduler_classes[0]
__A : List[Any] = self.get_scheduler_config()
__A : List[Any] = scheduler_class(**_UpperCAmelCase)
scheduler.set_timesteps(self.num_inference_steps)
__A : Any = self.dummy_model()
__A : int = self.dummy_sample_deter * scheduler.init_noise_sigma
__A : List[Any] = sample.to(_UpperCAmelCase)
for i, t in enumerate(scheduler.timesteps):
__A : Dict = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase)
__A : Dict = model(_UpperCAmelCase , _UpperCAmelCase)
__A : int = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
__A : Dict = output.prev_sample
__A : Tuple = torch.sum(torch.abs(_UpperCAmelCase))
__A : str = torch.mean(torch.abs(_UpperCAmelCase))
if torch_device in ["cpu", "mps"]:
assert abs(result_sum.item() - 20.4125) < 1e-2
assert abs(result_mean.item() - 0.0266) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125) < 1e-2
assert abs(result_mean.item() - 0.0266) < 1e-3
def SCREAMING_SNAKE_CASE ( self):
'''simple docstring'''
if torch_device == "mps":
return
__A : List[str] = self.scheduler_classes[0]
__A : Optional[Any] = self.get_scheduler_config()
__A : List[Any] = scheduler_class(**_UpperCAmelCase)
scheduler.set_timesteps(self.num_inference_steps , device=_UpperCAmelCase)
__A : Optional[int] = self.dummy_model()
__A : str = self.dummy_sample_deter.to(_UpperCAmelCase) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
__A : str = scheduler.scale_model_input(_UpperCAmelCase , _UpperCAmelCase)
__A : List[Any] = model(_UpperCAmelCase , _UpperCAmelCase)
__A : int = scheduler.step(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase)
__A : Union[str, Any] = output.prev_sample
__A : int = torch.sum(torch.abs(_UpperCAmelCase))
__A : Dict = torch.mean(torch.abs(_UpperCAmelCase))
if str(_UpperCAmelCase).startswith('cpu'):
# The following sum varies between 148 and 156 on mps. Why?
assert abs(result_sum.item() - 20.4125) < 1e-2
assert abs(result_mean.item() - 0.0266) < 1e-3
else:
# CUDA
assert abs(result_sum.item() - 20.4125) < 1e-2
assert abs(result_mean.item() - 0.0266) < 1e-3 | 338 | 0 |
import argparse
import glob
import logging
import os
from argparse import Namespace
from importlib import import_module
import numpy as np
import torch
from lightning_base import BaseTransformer, add_generic_args, generic_train
from seqeval.metrics import accuracy_score, fa_score, precision_score, recall_score
from torch.nn import CrossEntropyLoss
from torch.utils.data import DataLoader, TensorDataset
from utils_ner import TokenClassificationTask
__magic_name__ =logging.getLogger(__name__)
class _A ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] ="token-classification"
def __init__(self , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
if type(SCREAMING_SNAKE_CASE_ ) == dict:
UpperCamelCase__ = Namespace(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = import_module('''tasks''' )
try:
UpperCamelCase__ = getattr(SCREAMING_SNAKE_CASE_ , hparams.task_type )
UpperCamelCase__ = token_classification_task_clazz()
except AttributeError:
raise ValueError(
F"Task {hparams.task_type} needs to be defined as a TokenClassificationTask subclass in {module}. "
F"Available tasks classes are: {TokenClassificationTask.__subclasses__()}" )
UpperCamelCase__ = self.token_classification_task.get_labels(hparams.labels )
UpperCamelCase__ = CrossEntropyLoss().ignore_index
super().__init__(SCREAMING_SNAKE_CASE_ , len(self.labels ) , self.mode )
def _a (self , **SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
'''simple docstring'''
return self.model(**SCREAMING_SNAKE_CASE_ )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
UpperCamelCase__ = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
UpperCamelCase__ = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCamelCase__ = self(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = outputs[0]
# tensorboard_logs = {"loss": loss, "rate": self.lr_scheduler.get_last_lr()[-1]}
return {"loss": loss}
def _a (self ) -> int:
'''simple docstring'''
UpperCamelCase__ = self.hparams
for mode in ["train", "dev", "test"]:
UpperCamelCase__ = self._feature_file(SCREAMING_SNAKE_CASE_ )
if os.path.exists(SCREAMING_SNAKE_CASE_ ) and not args.overwrite_cache:
logger.info('''Loading features from cached file %s''' , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.load(SCREAMING_SNAKE_CASE_ )
else:
logger.info('''Creating features from dataset file at %s''' , args.data_dir )
UpperCamelCase__ = self.token_classification_task.read_examples_from_file(args.data_dir , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = self.token_classification_task.convert_examples_to_features(
SCREAMING_SNAKE_CASE_ , self.labels , args.max_seq_length , self.tokenizer , cls_token_at_end=bool(self.config.model_type in ['''xlnet'''] ) , cls_token=self.tokenizer.cls_token , cls_token_segment_id=2 if self.config.model_type in ['''xlnet'''] else 0 , sep_token=self.tokenizer.sep_token , sep_token_extra=SCREAMING_SNAKE_CASE_ , pad_on_left=bool(self.config.model_type in ['''xlnet'''] ) , pad_token=self.tokenizer.pad_token_id , pad_token_segment_id=self.tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info('''Saving features into cached file %s''' , SCREAMING_SNAKE_CASE_ )
torch.save(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False ) -> DataLoader:
'''simple docstring'''
UpperCamelCase__ = self._feature_file(SCREAMING_SNAKE_CASE_ )
logger.info('''Loading features from cached file %s''' , SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.load(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = torch.tensor([f.input_ids for f in features] , dtype=torch.long )
UpperCamelCase__ = torch.tensor([f.attention_mask for f in features] , dtype=torch.long )
if features[0].token_type_ids is not None:
UpperCamelCase__ = torch.tensor([f.token_type_ids for f in features] , dtype=torch.long )
else:
UpperCamelCase__ = torch.tensor([0 for f in features] , dtype=torch.long )
# HACK(we will not use this anymore soon)
UpperCamelCase__ = torch.tensor([f.label_ids for f in features] , dtype=torch.long )
return DataLoader(
TensorDataset(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , batch_size=SCREAMING_SNAKE_CASE_ )
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
"""Compute validation""" ""
UpperCamelCase__ = {'''input_ids''': batch[0], '''attention_mask''': batch[1], '''labels''': batch[3]}
if self.config.model_type != "distilbert":
UpperCamelCase__ = (
batch[2] if self.config.model_type in ['''bert''', '''xlnet'''] else None
) # XLM and RoBERTa don"t use token_type_ids
UpperCamelCase__ = self(**SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ , UpperCamelCase__ = outputs[:2]
UpperCamelCase__ = logits.detach().cpu().numpy()
UpperCamelCase__ = inputs['''labels'''].detach().cpu().numpy()
return {"val_loss": tmp_eval_loss.detach().cpu(), "pred": preds, "target": out_label_ids}
def _a (self , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
'''simple docstring'''
UpperCamelCase__ = torch.stack([x['''val_loss'''] for x in outputs] ).mean()
UpperCamelCase__ = np.concatenate([x['''pred'''] for x in outputs] , axis=0 )
UpperCamelCase__ = np.argmax(SCREAMING_SNAKE_CASE_ , axis=2 )
UpperCamelCase__ = np.concatenate([x['''target'''] for x in outputs] , axis=0 )
UpperCamelCase__ = dict(enumerate(self.labels ) )
UpperCamelCase__ = [[] for _ in range(out_label_ids.shape[0] )]
UpperCamelCase__ = [[] for _ in range(out_label_ids.shape[0] )]
for i in range(out_label_ids.shape[0] ):
for j in range(out_label_ids.shape[1] ):
if out_label_ids[i, j] != self.pad_token_label_id:
out_label_list[i].append(label_map[out_label_ids[i][j]] )
preds_list[i].append(label_map[preds[i][j]] )
UpperCamelCase__ = {
'''val_loss''': val_loss_mean,
'''accuracy_score''': accuracy_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ),
'''precision''': precision_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ),
'''recall''': recall_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ),
'''f1''': fa_score(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ),
}
UpperCamelCase__ = dict(results.items() )
UpperCamelCase__ = results
return ret, preds_list, out_label_list
def _a (self , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._eval_end(SCREAMING_SNAKE_CASE_ )
UpperCamelCase__ = ret['''log''']
return {"val_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
def _a (self , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = self._eval_end(SCREAMING_SNAKE_CASE_ )
# Converting to the dict required by pl
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master/\
# pytorch_lightning/trainer/logging.py#L139
UpperCamelCase__ = ret['''log''']
# `val_loss` is the key returned by `self._eval_end()` but actually refers to `test_loss`
return {"avg_test_loss": logs["val_loss"], "log": logs, "progress_bar": logs}
@staticmethod
def _a (SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
'''simple docstring'''
BaseTransformer.add_model_specific_args(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
parser.add_argument(
'''--task_type''' , default='''NER''' , type=SCREAMING_SNAKE_CASE_ , help='''Task type to fine tune in training (e.g. NER, POS, etc)''' )
parser.add_argument(
'''--max_seq_length''' , default=128 , type=SCREAMING_SNAKE_CASE_ , help=(
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
) , )
parser.add_argument(
'''--labels''' , default='''''' , type=SCREAMING_SNAKE_CASE_ , help='''Path to a file containing all labels. If not specified, CoNLL-2003 labels are used.''' , )
parser.add_argument(
'''--gpus''' , default=0 , type=SCREAMING_SNAKE_CASE_ , help='''The number of GPUs allocated for this, it is by default 0 meaning none''' , )
parser.add_argument(
'''--overwrite_cache''' , action='''store_true''' , help='''Overwrite the cached training and evaluation sets''' )
return parser
if __name__ == "__main__":
__magic_name__ =argparse.ArgumentParser()
add_generic_args(parser, os.getcwd())
__magic_name__ =NERTransformer.add_model_specific_args(parser, os.getcwd())
__magic_name__ =parser.parse_args()
__magic_name__ =NERTransformer(args)
__magic_name__ =generic_train(model, args)
if args.do_predict:
# See https://github.com/huggingface/transformers/issues/3159
# pl use this default format to create a checkpoint:
# https://github.com/PyTorchLightning/pytorch-lightning/blob/master\
# /pytorch_lightning/callbacks/model_checkpoint.py#L322
__magic_name__ =sorted(glob.glob(os.path.join(args.output_dir, '''checkpoint-epoch=*.ckpt'''), recursive=True))
__magic_name__ =model.load_from_checkpoint(checkpoints[-1])
trainer.test(model)
| 415 | import argparse
import json
import os
import fairseq
import torch
from torch import nn
from transformers import (
SpeechaTextaConfig,
SpeechaTextaForCausalLM,
SpeechaTextaTokenizer,
SpeechEncoderDecoderConfig,
SpeechEncoderDecoderModel,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaModel,
logging,
)
logging.set_verbosity_info()
__magic_name__ =logging.get_logger(__name__)
__magic_name__ ={
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
__magic_name__ =[
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def __UpperCamelCase ( A , A , A , A , A ):
for attribute in key.split('''.''' ):
UpperCamelCase__ = getattr(A , A )
if weight_type is not None:
UpperCamelCase__ = getattr(A , A ).shape
else:
UpperCamelCase__ = hf_pointer.shape
assert hf_shape == value.shape, (
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}"
)
if weight_type == "weight":
UpperCamelCase__ = value
elif weight_type == "weight_g":
UpperCamelCase__ = value
elif weight_type == "weight_v":
UpperCamelCase__ = value
elif weight_type == "bias":
UpperCamelCase__ = value
else:
UpperCamelCase__ = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __UpperCamelCase ( A , A ):
UpperCamelCase__ = []
UpperCamelCase__ = fairseq_model.state_dict()
UpperCamelCase__ = hf_model.feature_extractor
# if encoder has different dim to decoder -> use proj_weight
UpperCamelCase__ = None
for name, value in fairseq_dict.items():
UpperCamelCase__ = False
if "conv_layers" in name:
load_conv_layer(
A , A , A , A , hf_model.config.feat_extract_norm == '''group''' , )
UpperCamelCase__ = True
elif name.split('''.''' )[0] == "proj":
UpperCamelCase__ = fairseq_model.proj
UpperCamelCase__ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
UpperCamelCase__ = True
if "*" in mapped_key:
UpperCamelCase__ = name.split(A )[0].split('''.''' )[-2]
UpperCamelCase__ = mapped_key.replace('''*''' , A )
if "weight_g" in name:
UpperCamelCase__ = '''weight_g'''
elif "weight_v" in name:
UpperCamelCase__ = '''weight_v'''
elif "bias" in name:
UpperCamelCase__ = '''bias'''
elif "weight" in name:
UpperCamelCase__ = '''weight'''
else:
UpperCamelCase__ = None
set_recursively(A , A , A , A , A )
continue
if not is_used:
unused_weights.append(A )
logger.warning(f"Unused weights: {unused_weights}" )
return proj_weight
def __UpperCamelCase ( A , A , A , A , A ):
UpperCamelCase__ = full_name.split('''conv_layers.''' )[-1]
UpperCamelCase__ = name.split('''.''' )
UpperCamelCase__ = int(items[0] )
UpperCamelCase__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
UpperCamelCase__ = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
UpperCamelCase__ = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
UpperCamelCase__ = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f"{full_name} has size {value.shape}, but"
f" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
UpperCamelCase__ = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(A )
def __UpperCamelCase ( A ):
UpperCamelCase__ , UpperCamelCase__ = emb.weight.shape
UpperCamelCase__ = nn.Linear(A , A , bias=A )
UpperCamelCase__ = emb.weight.data
return lin_layer
def __UpperCamelCase ( A ):
with open(A , '''r''' , encoding='''utf-8''' ) as f:
UpperCamelCase__ = f.readlines()
UpperCamelCase__ = [line.split(''' ''' )[0] for line in lines]
UpperCamelCase__ = len(A )
UpperCamelCase__ = {
'''<s>''': 0,
'''<pad>''': 1,
'''</s>''': 2,
'''<unk>''': 3,
}
vocab_dict.update(dict(zip(A , range(4 , num_words + 4 ) ) ) )
return vocab_dict
@torch.no_grad()
def __UpperCamelCase ( A , A , A , A , A , A , A , ):
UpperCamelCase__ = WavaVecaConfig.from_pretrained(A )
UpperCamelCase__ = SpeechaTextaConfig.from_pretrained(
A , vocab_size=A , decoder_layers=A , do_stable_layer_norm=A )
UpperCamelCase__ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=A , return_attention_mask=A , )
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
UpperCamelCase__ = model[0].eval()
# set weights for wav2vec2 encoder
UpperCamelCase__ = WavaVecaModel(A )
UpperCamelCase__ = recursively_load_weights_wavaveca(model.encoder , A )
UpperCamelCase__ = SpeechaTextaForCausalLM(A )
UpperCamelCase__ , UpperCamelCase__ = hf_decoder.model.decoder.load_state_dict(model.decoder.state_dict() , strict=A )
# set output linear layer
unexpected_keys.remove('''embed_out''' )
UpperCamelCase__ = nn.Parameter(model.decoder.embed_out.detach() )
# layer norm is init to identity matrix so leaving it is fine
logger.warning(f"The following keys are missing when loading the decoder weights: {missing_keys}" )
logger.warning(f"The following keys are unexpected when loading the decoder weights: {unexpected_keys}" )
UpperCamelCase__ = SpeechEncoderDecoderModel(encoder=A , decoder=A )
UpperCamelCase__ = False
# add projection layer
UpperCamelCase__ = nn.Parameter(projection_layer.weight )
UpperCamelCase__ = nn.Parameter(projection_layer.bias )
UpperCamelCase__ = create_vocab_dict(A )
with open(os.path.join(A , '''vocab.json''' ) , '''w''' ) as fp:
json.dump(A , A )
UpperCamelCase__ = SpeechaTextaTokenizer(os.path.join(A , '''vocab.json''' ) )
tokenizer.save_pretrained(A )
UpperCamelCase__ = hf_wavavec.config.to_dict()
UpperCamelCase__ = tokenizer.pad_token_id
UpperCamelCase__ = tokenizer.bos_token_id
UpperCamelCase__ = tokenizer.eos_token_id
UpperCamelCase__ = '''speech_to_text_2'''
UpperCamelCase__ = '''wav2vec2'''
UpperCamelCase__ = SpeechEncoderDecoderConfig.from_dict(A )
hf_wavavec.save_pretrained(A )
feature_extractor.save_pretrained(A )
if __name__ == "__main__":
__magic_name__ =argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument(
'''--encoder_config_path''',
default='''facebook/wav2vec2-large-lv60''',
type=str,
help='''Path to hf encoder wav2vec2 checkpoint config''',
)
parser.add_argument(
'''--decoder_config_path''',
default='''facebook/s2t-small-mustc-en-fr-st''',
type=str,
help='''Path to hf decoder s2t checkpoint config''',
)
parser.add_argument('''--vocab_size''', default=10224, type=int, help='''Vocab size of decoder''')
parser.add_argument('''--num_decoder_layers''', default=7, type=int, help='''Number of decoder layers''')
__magic_name__ =parser.parse_args()
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.dict_path,
encoder_config_path=args.encoder_config_path,
decoder_config_path=args.decoder_config_path,
vocab_size=args.vocab_size,
num_decoder_layers=args.num_decoder_layers,
)
| 415 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
_lowerCamelCase ={
"configuration_jukebox": [
"JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP",
"JukeboxConfig",
"JukeboxPriorConfig",
"JukeboxVQVAEConfig",
],
"tokenization_jukebox": ["JukeboxTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST",
"JukeboxModel",
"JukeboxPreTrainedModel",
"JukeboxVQVAE",
"JukeboxPrior",
]
if TYPE_CHECKING:
from .configuration_jukebox import (
JUKEBOX_PRETRAINED_CONFIG_ARCHIVE_MAP,
JukeboxConfig,
JukeboxPriorConfig,
JukeboxVQVAEConfig,
)
from .tokenization_jukebox import JukeboxTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_jukebox import (
JUKEBOX_PRETRAINED_MODEL_ARCHIVE_LIST,
JukeboxModel,
JukeboxPreTrainedModel,
JukeboxPrior,
JukeboxVQVAE,
)
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 717 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_lowerCamelCase ={
"configuration_blip": [
"BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
"BlipConfig",
"BlipTextConfig",
"BlipVisionConfig",
],
"processing_blip": ["BlipProcessor"],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =["BlipImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"BlipModel",
"BlipPreTrainedModel",
"BlipForConditionalGeneration",
"BlipForQuestionAnswering",
"BlipVisionModel",
"BlipTextModel",
"BlipForImageTextRetrieval",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase =[
"TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFBlipModel",
"TFBlipPreTrainedModel",
"TFBlipForConditionalGeneration",
"TFBlipForQuestionAnswering",
"TFBlipVisionModel",
"TFBlipTextModel",
"TFBlipForImageTextRetrieval",
]
if TYPE_CHECKING:
from .configuration_blip import BLIP_PRETRAINED_CONFIG_ARCHIVE_MAP, BlipConfig, BlipTextConfig, BlipVisionConfig
from .processing_blip import BlipProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_blip import BlipImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blip import (
BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
BlipModel,
BlipPreTrainedModel,
BlipTextModel,
BlipVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blip import (
TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBlipForConditionalGeneration,
TFBlipForImageTextRetrieval,
TFBlipForQuestionAnswering,
TFBlipModel,
TFBlipPreTrainedModel,
TFBlipTextModel,
TFBlipVisionModel,
)
else:
import sys
_lowerCamelCase =_LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 252 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowercase_ = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase_ = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
lowercase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 291 |
"""simple docstring"""
class lowerCAmelCase__ :
def __init__( self : List[Any] , _lowerCamelCase : str = "" , _lowerCamelCase : bool = False ):
# Mapping from the first character of the prefix of the node
_snake_case = {}
# A node will be a leaf if the tree contains its word
_snake_case = is_leaf
_snake_case = prefix
def lowercase ( self : List[Any] , _lowerCamelCase : str ):
_snake_case = 0
for q, w in zip(self.prefix , _lowerCamelCase ):
if q != w:
break
x += 1
return self.prefix[:x], self.prefix[x:], word[x:]
def lowercase ( self : Optional[Any] , _lowerCamelCase : list[str] ):
for word in words:
self.insert(_lowerCamelCase )
def lowercase ( self : Union[str, Any] , _lowerCamelCase : str ):
# Case 1: If the word is the prefix of the node
# Solution: We set the current node as leaf
if self.prefix == word:
_snake_case = True
# Case 2: The node has no edges that have a prefix to the word
# Solution: We create an edge from the current node to a new one
# containing the word
elif word[0] not in self.nodes:
_snake_case = RadixNode(prefix=_lowerCamelCase , is_leaf=_lowerCamelCase )
else:
_snake_case = self.nodes[word[0]]
_snake_case , _snake_case , _snake_case = incoming_node.match(
_lowerCamelCase )
# Case 3: The node prefix is equal to the matching
# Solution: We insert remaining word on the next node
if remaining_prefix == "":
self.nodes[matching_string[0]].insert(_lowerCamelCase )
# Case 4: The word is greater equal to the matching
# Solution: Create a node in between both nodes, change
# prefixes and add the new node for the remaining word
else:
_snake_case = remaining_prefix
_snake_case = self.nodes[matching_string[0]]
_snake_case = RadixNode(_lowerCamelCase , _lowerCamelCase )
_snake_case = aux_node
if remaining_word == "":
_snake_case = True
else:
self.nodes[matching_string[0]].insert(_lowerCamelCase )
def lowercase ( self : List[Any] , _lowerCamelCase : str ):
_snake_case = self.nodes.get(word[0] , _lowerCamelCase )
if not incoming_node:
return False
else:
_snake_case , _snake_case , _snake_case = incoming_node.match(
_lowerCamelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# This applies when the word and the prefix are equal
elif remaining_word == "":
return incoming_node.is_leaf
# We have word remaining so we check the next node
else:
return incoming_node.find(_lowerCamelCase )
def lowercase ( self : Union[str, Any] , _lowerCamelCase : str ):
_snake_case = self.nodes.get(word[0] , _lowerCamelCase )
if not incoming_node:
return False
else:
_snake_case , _snake_case , _snake_case = incoming_node.match(
_lowerCamelCase )
# If there is remaining prefix, the word can't be on the tree
if remaining_prefix != "":
return False
# We have word remaining so we check the next node
elif remaining_word != "":
return incoming_node.delete(_lowerCamelCase )
else:
# If it is not a leaf, we don't have to delete
if not incoming_node.is_leaf:
return False
else:
# We delete the nodes if no edges go from it
if len(incoming_node.nodes ) == 0:
del self.nodes[word[0]]
# We merge the current node with its only child
if len(self.nodes ) == 1 and not self.is_leaf:
_snake_case = list(self.nodes.values() )[0]
_snake_case = merging_node.is_leaf
self.prefix += merging_node.prefix
_snake_case = merging_node.nodes
# If there is more than 1 edge, we just mark it as non-leaf
elif len(incoming_node.nodes ) > 1:
_snake_case = False
# If there is 1 edge, we merge it with its child
else:
_snake_case = list(incoming_node.nodes.values() )[0]
_snake_case = merging_node.is_leaf
incoming_node.prefix += merging_node.prefix
_snake_case = merging_node.nodes
return True
def lowercase ( self : List[Any] , _lowerCamelCase : int = 0 ):
if self.prefix != "":
print('''-''' * height , self.prefix , ''' (leaf)''' if self.is_leaf else '''''' )
for value in self.nodes.values():
value.print_tree(height + 1 )
def _UpperCAmelCase ( ) -> bool:
_snake_case = '''banana bananas bandana band apple all beast'''.split()
_snake_case = RadixNode()
root.insert_many(__lowerCamelCase )
assert all(root.find(__lowerCamelCase ) for word in words )
assert not root.find('''bandanas''' )
assert not root.find('''apps''' )
root.delete('''all''' )
assert not root.find('''all''' )
root.delete('''banana''' )
assert not root.find('''banana''' )
assert root.find('''bananas''' )
return True
def _UpperCAmelCase ( ) -> None:
assert test_trie()
def _UpperCAmelCase ( ) -> None:
_snake_case = RadixNode()
_snake_case = '''banana bananas bandanas bandana band apple all beast'''.split()
root.insert_many(__lowerCamelCase )
print('''Words:''' , __lowerCamelCase )
print('''Tree:''' )
root.print_tree()
if __name__ == "__main__":
main()
| 224 | 0 |
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
snake_case : List[str] = logging.getLogger()
def snake_case__ ( __lowercase ) -> Union[str, Any]:
"""simple docstring"""
A__ : Dict = {}
A__ : Optional[int] = os.path.join(__lowercase , "all_results.json" )
if os.path.exists(__lowercase ):
with open(__lowercase , "r" ) as f:
A__ : Any = json.load(__lowercase )
else:
raise ValueError(F'can\'t find {path}' )
return results
snake_case : Tuple = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class lowerCAmelCase__ ( UpperCamelCase ):
def _lowercase ( self : List[Any]):
import xla_spawn
A__ : Any = self.get_auto_remove_tmp_dir()
A__ : int = F'\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split()
with patch.object(_A , "argv" , _A):
A__ : List[Any] = time()
xla_spawn.main()
A__ : List[str] = time()
A__ : Optional[int] = get_results(_A)
self.assertGreaterEqual(result["eval_accuracy"] , 0.75)
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500)
def _lowercase ( self : List[Any]):
import xla_spawn
A__ : Union[str, Any] = "\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n ".split()
with patch.object(_A , "argv" , _A):
xla_spawn.main() | 705 |
from collections.abc import Callable
import numpy as np
def snake_case__ ( __lowercase , __lowercase , __lowercase , __lowercase , __lowercase ) -> np.array:
"""simple docstring"""
A__ : Any = int(np.ceil((x_end - xa) / step_size ) )
A__ : Union[str, Any] = np.zeros((n + 1,) )
A__ : Any = ya
A__ : Union[str, Any] = xa
for k in range(__lowercase ):
A__ : Any = y[k] + step_size * ode_func(__lowercase , y[k] )
A__ : Any = y[k] + (
(step_size / 2) * (ode_func(__lowercase , y[k] ) + ode_func(x + step_size , __lowercase ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 182 | 0 |
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
SCREAMING_SNAKE_CASE : str = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE : Dict = {
'facebook/blenderbot_small-90M': 'https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class A_ ( a_ ):
_SCREAMING_SNAKE_CASE = 'blenderbot-small'
_SCREAMING_SNAKE_CASE = ['past_key_values']
_SCREAMING_SNAKE_CASE = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Optional[Any]=5_02_65 , __SCREAMING_SNAKE_CASE : Optional[Any]=5_12 , __SCREAMING_SNAKE_CASE : Any=8 , __SCREAMING_SNAKE_CASE : Any=20_48 , __SCREAMING_SNAKE_CASE : Any=16 , __SCREAMING_SNAKE_CASE : str=8 , __SCREAMING_SNAKE_CASE : str=20_48 , __SCREAMING_SNAKE_CASE : Optional[int]=16 , __SCREAMING_SNAKE_CASE : int=0.0 , __SCREAMING_SNAKE_CASE : Optional[Any]=0.0 , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : int=True , __SCREAMING_SNAKE_CASE : List[Any]="gelu" , __SCREAMING_SNAKE_CASE : Optional[Any]=5_12 , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : Dict=0.0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.0 , __SCREAMING_SNAKE_CASE : Union[str, Any]=0.02 , __SCREAMING_SNAKE_CASE : Optional[Any]=1 , __SCREAMING_SNAKE_CASE : str=False , __SCREAMING_SNAKE_CASE : List[str]=0 , __SCREAMING_SNAKE_CASE : Optional[int]=1 , __SCREAMING_SNAKE_CASE : List[str]=2 , __SCREAMING_SNAKE_CASE : str=2 , **__SCREAMING_SNAKE_CASE : Dict , ):
__a = vocab_size
__a = max_position_embeddings
__a = d_model
__a = encoder_ffn_dim
__a = encoder_layers
__a = encoder_attention_heads
__a = decoder_ffn_dim
__a = decoder_layers
__a = decoder_attention_heads
__a = dropout
__a = attention_dropout
__a = activation_dropout
__a = activation_function
__a = init_std
__a = encoder_layerdrop
__a = decoder_layerdrop
__a = use_cache
__a = encoder_layers
__a = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ , is_encoder_decoder=UpperCAmelCase__ , decoder_start_token_id=UpperCAmelCase__ , forced_eos_token_id=UpperCAmelCase__ , **UpperCAmelCase__ , )
class A_ ( a_ ):
@property
def _UpperCAmelCase ( self : str ):
if self.task in ["default", "seq2seq-lm"]:
__a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__a = {0: '''batch'''}
__a = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
__a = {0: '''batch''', 1: '''decoder_sequence'''}
__a = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(UpperCAmelCase__ , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
__a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
__a = self.num_layers
for i in range(UpperCAmelCase__ ):
__a = {0: '''batch''', 2: '''past_sequence + sequence'''}
__a = {0: '''batch''', 2: '''past_sequence + sequence'''}
else:
__a = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def _UpperCAmelCase ( self : Optional[Any] ):
if self.task in ["default", "seq2seq-lm"]:
__a = super().outputs
else:
__a = super(UpperCAmelCase__ , self ).outputs
if self.use_past:
__a = self.num_layers
for i in range(UpperCAmelCase__ ):
__a = {0: '''batch''', 2: '''past_sequence + sequence'''}
__a = {0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def _UpperCAmelCase ( self : Dict , __SCREAMING_SNAKE_CASE : PreTrainedTokenizer , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ):
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
# Generate decoder inputs
__a = seq_length if not self.use_past else 1
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
__a = {f"""decoder_{name}""": tensor for name, tensor in decoder_inputs.items()}
__a = dict(**UpperCAmelCase__ , **UpperCAmelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__a = common_inputs['''input_ids'''].shape
__a = common_inputs['''decoder_input_ids'''].shape[1]
__a = self.num_attention_heads
__a = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__a = decoder_seq_length + 3
__a = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__a = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(UpperCAmelCase__ , UpperCAmelCase__ )] , dim=1 )
__a = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__a = self.num_layers
__a = min(UpperCAmelCase__ , UpperCAmelCase__ )
__a = max(UpperCAmelCase__ , UpperCAmelCase__ ) - min_num_layers
__a = '''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(UpperCAmelCase__ ):
common_inputs["past_key_values"].append(
(
torch.zeros(UpperCAmelCase__ ),
torch.zeros(UpperCAmelCase__ ),
torch.zeros(UpperCAmelCase__ ),
torch.zeros(UpperCAmelCase__ ),
) )
# TODO: test this.
__a = encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(UpperCAmelCase__ , UpperCAmelCase__ ):
common_inputs["past_key_values"].append((torch.zeros(UpperCAmelCase__ ), torch.zeros(UpperCAmelCase__ )) )
return common_inputs
def _UpperCAmelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : PreTrainedTokenizer , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ):
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
__a = common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__a = seqlen + 2
__a = self.num_layers
__a = self.num_attention_heads
__a = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__a = common_inputs['''attention_mask'''].dtype
__a = torch.cat(
[common_inputs["attention_mask"], torch.ones(UpperCAmelCase__ , UpperCAmelCase__ , dtype=UpperCAmelCase__ )] , dim=1 )
__a = [
(torch.zeros(UpperCAmelCase__ ), torch.zeros(UpperCAmelCase__ )) for _ in range(UpperCAmelCase__ )
]
return common_inputs
def _UpperCAmelCase ( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : PreTrainedTokenizer , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ):
__a = compute_effective_axis_dimension(
UpperCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__a = tokenizer.num_special_tokens_to_add(UpperCAmelCase__ )
__a = compute_effective_axis_dimension(
UpperCAmelCase__ , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=UpperCAmelCase__ )
# Generate dummy inputs according to compute batch and sequence
__a = [''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
__a = dict(tokenizer(UpperCAmelCase__ , return_tensors=UpperCAmelCase__ ) )
return common_inputs
def _UpperCAmelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : PreTrainedTokenizer , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : int = -1 , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
__a = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
UpperCAmelCase__ , batch_size=UpperCAmelCase__ , seq_length=UpperCAmelCase__ , is_pair=UpperCAmelCase__ , framework=UpperCAmelCase__ )
elif self.task == "causal-lm":
__a = self._generate_dummy_inputs_for_causal_lm(
UpperCAmelCase__ , batch_size=UpperCAmelCase__ , seq_length=UpperCAmelCase__ , is_pair=UpperCAmelCase__ , framework=UpperCAmelCase__ )
else:
__a = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
UpperCAmelCase__ , batch_size=UpperCAmelCase__ , seq_length=UpperCAmelCase__ , is_pair=UpperCAmelCase__ , framework=UpperCAmelCase__ )
return common_inputs
def _UpperCAmelCase ( self : str , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int ):
if self.task in ["default", "seq2seq-lm"]:
__a = super()._flatten_past_key_values_(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
else:
__a = super(UpperCAmelCase__ , self )._flatten_past_key_values_(
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
| 197 |
from math import log
from scipy.constants import Boltzmann, physical_constants
_a : List[str] = 300 # TEMPERATURE (unit = K)
def a_ ( __magic_name__ , __magic_name__ , __magic_name__ , ) -> float:
"""simple docstring"""
if donor_conc <= 0:
raise ValueError('''Donor concentration should be positive''' )
elif acceptor_conc <= 0:
raise ValueError('''Acceptor concentration should be positive''' )
elif intrinsic_conc <= 0:
raise ValueError('''Intrinsic concentration should be positive''' )
elif donor_conc <= intrinsic_conc:
raise ValueError(
'''Donor concentration should be greater than intrinsic concentration''' )
elif acceptor_conc <= intrinsic_conc:
raise ValueError(
'''Acceptor concentration should be greater than intrinsic concentration''' )
else:
return (
Boltzmann
* T
* log((donor_conc * acceptor_conc) / intrinsic_conc**2 )
/ physical_constants["electron volt"][0]
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 598 | 0 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis_float32 (there's also the fix_lavis branch)
# also note: to convert Vicuna checkpoints, we had to include /home/niels/python_projects/checkpoints/FastChat/vicuna-7b in lavis/configs/models/blip2/blip2_instruct_vicuna7b.yaml
# same for Vicuna-13b
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipImageProcessor,
InstructBlipConfig,
InstructBlipForConditionalGeneration,
InstructBlipProcessor,
InstructBlipQFormerConfig,
InstructBlipVisionConfig,
LlamaConfig,
LlamaTokenizerFast,
TaConfig,
TaTokenizerFast,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __lowercase ( ):
a__ = 'https://raw.githubusercontent.com/salesforce/LAVIS/main/docs/_static/Confusing-Pictures.jpg'
a__ = Image.open(requests.get(__lowerCAmelCase , stream=__lowerCAmelCase ).raw ).convert('RGB' )
return image
def __lowercase ( __lowerCAmelCase : Optional[int] ):
a__ = []
# fmt: off
# vision encoder
rename_keys.append(('visual_encoder.cls_token', 'vision_model.embeddings.class_embedding') )
rename_keys.append(('visual_encoder.pos_embed', 'vision_model.embeddings.position_embedding') )
rename_keys.append(('visual_encoder.patch_embed.proj.weight', 'vision_model.embeddings.patch_embedding.weight') )
rename_keys.append(('visual_encoder.patch_embed.proj.bias', 'vision_model.embeddings.patch_embedding.bias') )
rename_keys.append(('ln_vision.weight', 'vision_model.post_layernorm.weight') )
rename_keys.append(('ln_vision.bias', 'vision_model.post_layernorm.bias') )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'visual_encoder.blocks.{i}.norm1.weight', F'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm1.bias', F'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm2.weight', F'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm2.bias', F'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.qkv.weight', F'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.weight', F'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.bias', F'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.weight', F'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.bias', F'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.weight', F'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.bias', F'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.weight', 'qformer.embeddings.layernorm.weight') )
rename_keys.append(('Qformer.bert.embeddings.LayerNorm.bias', 'qformer.embeddings.layernorm.bias') )
# fmt: on
return rename_keys
def __lowercase ( __lowerCAmelCase : Tuple , __lowerCAmelCase : Dict , __lowerCAmelCase : List[Any] ):
a__ = dct.pop(__lowerCAmelCase )
a__ = val
def __lowercase ( __lowerCAmelCase : int , __lowerCAmelCase : Union[str, Any] ):
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
a__ = state_dict.pop(F'visual_encoder.blocks.{i}.attn.q_bias' )
a__ = state_dict.pop(F'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
a__ = torch.cat((q_bias, torch.zeros_like(__lowerCAmelCase , requires_grad=__lowerCAmelCase ), v_bias) )
a__ = qkv_bias
def __lowercase ( __lowerCAmelCase : Tuple ):
a__ = 3_6_4 if 'coco' in model_name else 2_2_4
a__ = InstructBlipVisionConfig(image_size=__lowerCAmelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "t5-xl" in model_name:
a__ = TaConfig.from_pretrained('google/flan-t5-xl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
a__ = TaConfig.from_pretrained('google/flan-t5-xxl' , dense_act_fn='gelu' , bos_token_id=1 ).to_dict()
elif "vicuna-7b" in model_name:
a__ = LlamaConfig.from_pretrained('decapoda-research/llama-7b-hf' , vocab_size=3_2_0_0_1 ).to_dict()
elif "vicuna-13b" in model_name:
a__ = LlamaConfig.from_pretrained('decapoda-research/llama-13b-hf' , vocab_size=3_2_0_0_1 ).to_dict()
else:
raise ValueError('Model name not supported' )
# the authors add one special "[DEC]" token to the vocab of Q-Former, hence vocab size = 30522 + 1
a__ = InstructBlipQFormerConfig(vocab_size=3_0_5_2_3 ).to_dict()
a__ = InstructBlipConfig(vision_config=__lowerCAmelCase , text_config=__lowerCAmelCase , qformer_config=__lowerCAmelCase )
return config, image_size
@torch.no_grad()
def __lowercase ( __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : Tuple=None , __lowerCAmelCase : Optional[Any]=False ):
a__ = AutoTokenizer.from_pretrained('bert-base-uncased' , truncation_side='left' )
qformer_tokenizer.add_special_tokens({'bos_token': '[DEC]'} )
if "t5" in model_name:
a__ = TaTokenizerFast.from_pretrained('google/flan-t5-xl' , truncation_side='left' )
elif "vicuna" in model_name:
# the following was used in the original implementation:
# tokenizer = LlamaTokenizer.from_pretrained("huggyllama/llama-7b", use_fast=False, truncation_side="left")
# tokenizer.add_special_tokens({"pad_token": "[PAD]"})
# tokenizer.add_special_tokens({"bos_token": "</s>"})
# tokenizer.add_special_tokens({"eos_token": "</s>"})
# tokenizer.add_special_tokens({"unk_token": "</s>"})
a__ = LlamaTokenizerFast.from_pretrained(
'huggyllama/llama-7b' , truncation_side='left' , bos_token='</s>' , unk_token='</s>' )
tokenizer.add_special_tokens({'pad_token': '[PAD]'} )
a__ , a__ = get_blipa_config(__lowerCAmelCase )
a__ = InstructBlipForConditionalGeneration(__lowerCAmelCase ).eval()
a__ = {
'instructblip-vicuna-7b': ('blip2_vicuna_instruct', 'vicuna7b'),
'instructblip-vicuna-13b': ('blip2_vicuna_instruct', 'vicuna13b'),
'instructblip-flan-t5-xl': ('blip2_t5_instruct', 'flant5xl'),
'instructblip-flan-t5-xxl': ('blip2_t5_instruct', 'flant5xxl'),
}
a__ , a__ = model_name_to_original[model_name]
# load original model
print('Loading original model...' )
a__ = 'cuda:1' if torch.cuda.is_available() else 'cpu'
a__ = 'cuda:2' if torch.cuda.is_available() else 'cpu'
a__ , a__ , a__ = load_model_and_preprocess(
name=__lowerCAmelCase , model_type=__lowerCAmelCase , is_eval=__lowerCAmelCase , device=__lowerCAmelCase )
original_model.eval()
print('Done!' )
# update state dict keys
a__ = original_model.state_dict()
a__ = create_rename_keys(__lowerCAmelCase )
for src, dest in rename_keys:
rename_key(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
a__ = state_dict.pop(__lowerCAmelCase )
if key.startswith('Qformer.bert' ):
a__ = key.replace('Qformer.bert' , 'qformer' )
if "attention.self" in key:
a__ = key.replace('self' , 'attention' )
if "llm_proj" in key:
a__ = key.replace('llm_proj' , 'language_projection' )
if "t5_proj" in key:
a__ = key.replace('t5_proj' , 'language_projection' )
if key.startswith('llm_model' ):
a__ = key.replace('llm_model' , 'language_model' )
if key.startswith('t5' ):
a__ = key.replace('t5' , 'language' )
a__ = val
# read in qv biases
read_in_q_v_bias(__lowerCAmelCase , __lowerCAmelCase )
# note: weights get loaded in torch.float32 by default
hf_model.load_state_dict(__lowerCAmelCase , strict=__lowerCAmelCase )
a__ = load_demo_image()
a__ = 'What is unusual about this image?'
# create processor
a__ = BlipImageProcessor(
size={'height': image_size, 'width': image_size} , image_mean=__lowerCAmelCase , image_std=__lowerCAmelCase )
a__ = InstructBlipProcessor(
image_processor=__lowerCAmelCase , tokenizer=__lowerCAmelCase , qformer_tokenizer=__lowerCAmelCase , )
a__ = processor(images=__lowerCAmelCase , text=__lowerCAmelCase , return_tensors='pt' ).to(__lowerCAmelCase )
# make sure processor creates exact same pixel values
a__ = vis_processors['eval'](__lowerCAmelCase ).unsqueeze(0 ).to(__lowerCAmelCase )
a__ = inputs.pixel_values
assert torch.allclose(original_pixel_values.to(pixel_values.device ) , __lowerCAmelCase )
original_model.to(__lowerCAmelCase )
hf_model.to(__lowerCAmelCase )
with torch.no_grad():
if "vicuna" in model_name:
a__ = original_model({'image': original_pixel_values, 'text_input': [prompt]} ).logits
a__ = hf_model(**__lowerCAmelCase ).logits
else:
a__ = original_model(
{'image': original_pixel_values, 'text_input': [prompt], 'text_output': ['\n']} ).logits
a__ = tokenizer('\n' , return_tensors='pt' ).input_ids.to(__lowerCAmelCase )
a__ = label_input_ids.masked_fill(label_input_ids == tokenizer.pad_token_id , -1_0_0 )
a__ = hf_model(**__lowerCAmelCase , labels=__lowerCAmelCase ).logits
print('First values of original logits:' , original_logits[0, :3, :3] )
print('First values of HF logits:' , logits[0, :3, :3] )
# assert values
assert original_logits.shape == logits.shape
a__ = 1E-4 if 'vicuna' in model_name else 1E-5
assert torch.allclose(original_logits.to(logits.device ) , __lowerCAmelCase , atol=__lowerCAmelCase )
print('Looks ok!' )
print('Generating with original model...' )
a__ = original_model.generate({'image': original_pixel_values, 'prompt': prompt} , num_beams=5 )
# important: we need to cast the weights of the HF model to the appropriate type
print('Generating with HF model...' )
a__ = hf_model.generate(
**__lowerCAmelCase , do_sample=__lowerCAmelCase , num_beams=5 , max_length=2_5_6 , min_length=1 , top_p=0.9 , repetition_penalty=1.5 , length_penalty=1.0 , temperature=1 , )
if "vicuna" in model_name:
# convert output id 0 to 2 (eos_token_id)
# TODO add this in the generate method?
a__ = 2
print('Original generation:' , __lowerCAmelCase )
a__ = processor.batch_decode(__lowerCAmelCase , skip_special_tokens=__lowerCAmelCase )
a__ = [text.strip() for text in output_text]
print('HF generation:' , __lowerCAmelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__lowerCAmelCase )
hf_model.save_pretrained(__lowerCAmelCase )
if push_to_hub:
processor.push_to_hub(F'Salesforce/{model_name}' )
hf_model.push_to_hub(F'Salesforce/{model_name}' )
if __name__ == "__main__":
snake_case : Tuple = argparse.ArgumentParser()
snake_case : Optional[int] = [
'''instructblip-vicuna-7b''',
'''instructblip-vicuna-13b''',
'''instructblip-flan-t5-xl''',
'''instructblip-flan-t5-xxl''',
]
parser.add_argument(
'''--model_name''',
default='''instructblip-flan-t5-xl''',
choices=choices,
type=str,
help='''Path to hf config.json of model to convert''',
)
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model and processor to the hub after converting''',
)
snake_case : Any = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 711 |
from decimal import Decimal, getcontext
from math import ceil, factorial
def __lowercase ( __lowerCAmelCase : int ):
if not isinstance(__lowerCAmelCase , __lowerCAmelCase ):
raise TypeError('Undefined for non-integers' )
elif precision < 1:
raise ValueError('Undefined for non-natural numbers' )
a__ = precision
a__ = ceil(precision / 1_4 )
a__ = 4_2_6_8_8_0 * Decimal(1_0_0_0_5 ).sqrt()
a__ = 1
a__ = 1_3_5_9_1_4_0_9
a__ = Decimal(__lowerCAmelCase )
for k in range(1 , __lowerCAmelCase ):
a__ = factorial(6 * k ) // (factorial(3 * k ) * factorial(__lowerCAmelCase ) ** 3)
linear_term += 5_4_5_1_4_0_1_3_4
exponential_term *= -2_6_2_5_3_7_4_1_2_6_4_0_7_6_8_0_0_0
partial_sum += Decimal(multinomial_term * linear_term ) / exponential_term
return str(constant_term / partial_sum )[:-1]
if __name__ == "__main__":
snake_case : Tuple = 50
print(f"""The first {n} digits of pi is: {pi(n)}""")
| 657 | 0 |
"""simple docstring"""
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[int]=5 ):
"""simple docstring"""
assert masked_input.count("""<mask>""" ) == 1
snake_case_ : Optional[int] = torch.tensor(tokenizer.encode(__A , add_special_tokens=__A ) ).unsqueeze(0 ) # Batch size 1
snake_case_ : Dict = model(__A )[0] # The last hidden-state is the first element of the output tuple
snake_case_ : Union[str, Any] = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
snake_case_ : Optional[int] = logits[0, masked_index, :]
snake_case_ : List[Any] = logits.softmax(dim=0 )
snake_case_ : Any = prob.topk(k=__A , dim=0 )
snake_case_ : List[str] = """ """.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(__A ) )] )
snake_case_ : Tuple = tokenizer.mask_token
snake_case_ : str = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(""" """ ) ):
snake_case_ : Optional[Any] = predicted_token_bpe.replace("""\u2581""" , """ """ )
if " {0}".format(__A ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(""" {0}""".format(__A ) , __A ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(__A , __A ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
a_ = CamembertTokenizer.from_pretrained('''camembert-base''')
a_ = CamembertForMaskedLM.from_pretrained('''camembert-base''')
model.eval()
a_ = "Le camembert est <mask> :)"
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 480 |
"""simple docstring"""
import os
import random
import sys
from . import cryptomath_module as cryptoMath # noqa: N812
from . import rabin_miller as rabinMiller # noqa: N812
def __snake_case ( ) -> None:
print("""Making key files...""" )
make_key_files("""rsa""" ,1024 )
print("""Key files generation successful.""" )
def __snake_case ( __A ) -> tuple[tuple[int, int], tuple[int, int]]:
print("""Generating prime p...""" )
lowercase : int = rabinMiller.generate_large_prime(__A )
print("""Generating prime q...""" )
lowercase : Optional[int] = rabinMiller.generate_large_prime(__A )
lowercase : Optional[int] = p * q
print("""Generating e that is relatively prime to (p - 1) * (q - 1)...""" )
while True:
lowercase : List[str] = random.randrange(2 ** (key_size - 1) ,2 ** (key_size) )
if cryptoMath.gcd(__A ,(p - 1) * (q - 1) ) == 1:
break
print("""Calculating d that is mod inverse of e...""" )
lowercase : List[Any] = cryptoMath.find_mod_inverse(__A ,(p - 1) * (q - 1) )
lowercase : Any = (n, e)
lowercase : Optional[Any] = (n, d)
return (public_key, private_key)
def __snake_case ( __A ,__A ) -> None:
if os.path.exists(F'''{name}_pubkey.txt''' ) or os.path.exists(F'''{name}_privkey.txt''' ):
print("""\nWARNING:""" )
print(
F'''"{name}_pubkey.txt" or "{name}_privkey.txt" already exists. \n'''
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
lowercase , lowercase : Optional[int] = generate_key(__A )
print(F'''\nWriting public key to file {name}_pubkey.txt...''' )
with open(F'''{name}_pubkey.txt''' ,"""w""" ) as out_file:
out_file.write(F'''{key_size},{public_key[0]},{public_key[1]}''' )
print(F'''Writing private key to file {name}_privkey.txt...''' )
with open(F'''{name}_privkey.txt''' ,"""w""" ) as out_file:
out_file.write(F'''{key_size},{private_key[0]},{private_key[1]}''' )
if __name__ == "__main__":
main()
| 607 | 0 |
from __future__ import annotations
from math import pi
def __UpperCamelCase ( _A : float , _A : float , _A : float ) ->dict[str, float]:
"""simple docstring"""
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError("""One and only one argument must be 0""" )
if inductance < 0:
raise ValueError("""Inductance cannot be negative""" )
if frequency < 0:
raise ValueError("""Frequency cannot be negative""" )
if reactance < 0:
raise ValueError("""Inductive reactance cannot be negative""" )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError("""Exactly one argument must be 0""" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 75 |
import os
from datetime import datetime as dt
from github import Github
__A : Optional[int] = [
'good first issue',
'good second issue',
'good difficult issue',
'enhancement',
'new pipeline/model',
'new scheduler',
'wip',
]
def __UpperCamelCase ( ) ->Dict:
"""simple docstring"""
lowerCamelCase_ =Github(os.environ["""GITHUB_TOKEN"""] )
lowerCamelCase_ =g.get_repo("""huggingface/diffusers""" )
lowerCamelCase_ =repo.get_issues(state="""open""" )
for issue in open_issues:
lowerCamelCase_ =sorted(issue.get_comments() , key=lambda _A : i.created_at , reverse=_A )
lowerCamelCase_ =comments[0] if len(_A ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Closes the issue after 7 days of inactivity since the Stalebot notification.
issue.edit(state="""closed""" )
elif (
"stale" in issue.get_labels()
and last_comment is not None
and last_comment.user.login != "github-actions[bot]"
):
# Opens the issue if someone other than Stalebot commented.
issue.edit(state="""open""" )
issue.remove_from_labels("""stale""" )
elif (
(dt.utcnow() - issue.updated_at).days > 23
and (dt.utcnow() - issue.created_at).days >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Post a Stalebot notification after 23 days of inactivity.
issue.create_comment(
"""This issue has been automatically marked as stale because it has not had """
"""recent activity. If you think this still needs to be addressed """
"""please comment on this thread.\n\nPlease note that issues that do not follow the """
"""[contributing guidelines](https://github.com/huggingface/diffusers/blob/main/CONTRIBUTING.md) """
"""are likely to be ignored.""" )
issue.add_to_labels("""stale""" )
if __name__ == "__main__":
main()
| 75 | 1 |
"""simple docstring"""
def lowercase (snake_case__ : float , snake_case__ : float , snake_case__ : float , snake_case__ : float , snake_case__ : float , ) -> float:
'''simple docstring'''
lowerCAmelCase = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError("""All input parameters must be positive""" )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError("""Relative densities cannot be greater than one""" )
else:
lowerCAmelCase = 1 - (matter_density + radiation_density + dark_energy)
lowerCAmelCase = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
lowerCAmelCase = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
a = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1e-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
)
| 169 |
"""simple docstring"""
from __future__ import annotations
from typing import Any
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Any , lowerCAmelCase : int ):
lowerCAmelCase = num_of_nodes
lowerCAmelCase = []
lowerCAmelCase = {}
def __lowercase ( self : Optional[int] , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : int ):
self.m_edges.append([u_node, v_node, weight] )
def __lowercase ( self : List[str] , lowerCAmelCase : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def __lowercase ( self : str , lowerCAmelCase : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
lowerCAmelCase = self.find_component(lowerCAmelCase )
def __lowercase ( self : List[str] , lowerCAmelCase : list[int] , lowerCAmelCase : int , lowerCAmelCase : int ):
if component_size[u_node] <= component_size[v_node]:
lowerCAmelCase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(lowerCAmelCase )
elif component_size[u_node] >= component_size[v_node]:
lowerCAmelCase = self.find_component(lowerCAmelCase )
component_size[u_node] += component_size[v_node]
self.set_component(lowerCAmelCase )
def __lowercase ( self : List[Any] ):
lowerCAmelCase = []
lowerCAmelCase = 0
lowerCAmelCase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
lowerCAmelCase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = edge
lowerCAmelCase = self.m_component[u]
lowerCAmelCase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
lowerCAmelCase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = edge
lowerCAmelCase = self.m_component[u]
lowerCAmelCase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
print(f'''Added edge [{u} - {v}]\nAdded weight: {w}\n''' )
num_of_components -= 1
lowerCAmelCase = [-1] * self.m_num_of_nodes
print(f'''The total weight of the minimal spanning tree is: {mst_weight}''' )
def lowercase () -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 169 | 1 |
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = False ) -> list[float]:
'''simple docstring'''
if radian_mode:
return [magnitude * cos(_UpperCAmelCase ), magnitude * sin(_UpperCAmelCase )]
return [magnitude * cos(radians(_UpperCAmelCase ) ), magnitude * sin(radians(_UpperCAmelCase ) )]
def __lowercase ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = 10**-1 ) -> bool:
'''simple docstring'''
__lowercase = cross(_UpperCAmelCase , _UpperCAmelCase )
__lowercase = sum(_UpperCAmelCase )
return abs(_UpperCAmelCase ) < eps
if __name__ == "__main__":
# Test to check if it works
lowerCAmelCase__ = array(
[
polar_force(718.4, 180 - 30),
polar_force(879.54, 45),
polar_force(100, -90),
]
)
lowerCAmelCase__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
lowerCAmelCase__ = array(
[
polar_force(30 * 9.81, 15),
polar_force(215, 180 - 45),
polar_force(264, 90 - 30),
]
)
lowerCAmelCase__ = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
lowerCAmelCase__ = array([[0, -2_000], [0, -1_200], [0, 15_600], [0, -12_400]])
lowerCAmelCase__ = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 576 | import math
def __lowercase ( _UpperCAmelCase ) -> str:
'''simple docstring'''
__lowercase = 0
__lowercase = 0
while num > 0:
__lowercase = num % 8
__lowercase = octal + (remainder * math.floor(math.pow(10 , _UpperCAmelCase ) ))
counter += 1
__lowercase = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return f'''0o{int(_UpperCAmelCase )}'''
def __lowercase ( ) -> None:
'''simple docstring'''
print("\n2 in octal is:" )
print(decimal_to_octal(2 ) ) # = 2
print("\n8 in octal is:" )
print(decimal_to_octal(8 ) ) # = 10
print("\n65 in octal is:" )
print(decimal_to_octal(65 ) ) # = 101
print("\n216 in octal is:" )
print(decimal_to_octal(216 ) ) # = 330
print("\n512 in octal is:" )
print(decimal_to_octal(512 ) ) # = 1000
print("\n" )
if __name__ == "__main__":
main()
| 576 | 1 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFXLMRobertaModel
@require_tf
@require_sentencepiece
@require_tokenizers
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
@slow
def _lowerCamelCase ( self :Optional[Any] ) -> str:
__UpperCamelCase : List[str] = TFXLMRobertaModel.from_pretrained("jplu/tf-xlm-roberta-base" )
__UpperCamelCase : int = {
"input_ids": tf.convert_to_tensor([[0, 2_6_4_6, 1_0_2_6_9, 8_3, 9_9_9_4_2, 2]] , dtype=tf.intaa ), # "My dog is cute"
"attention_mask": tf.convert_to_tensor([[1, 1, 1, 1, 1, 1]] , dtype=tf.intaa ),
}
__UpperCamelCase : Union[str, Any] = model(a )["last_hidden_state"]
__UpperCamelCase : int = tf.TensorShape((1, 6, 7_6_8) )
self.assertEqual(output.shape , a )
# compare the actual values for a slice.
__UpperCamelCase : Tuple = tf.convert_to_tensor(
[
[
[0.0681762, 0.10894451, 0.06772504],
[-0.06423668, 0.02366615, 0.04329344],
[-0.06057295, 0.09974135, -0.00070584],
]
] , dtype=tf.floataa , )
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) ) | 557 |
from __future__ import annotations
class lowerCamelCase__ :
'''simple docstring'''
def __init__( self :Dict , a :str , a :str ) -> Union[str, Any]:
__UpperCamelCase , __UpperCamelCase : Optional[int] = text, pattern
__UpperCamelCase , __UpperCamelCase : Tuple = len(a ), len(a )
def _lowerCamelCase ( self :Any , a :str ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def _lowerCamelCase ( self :str , a :int ) -> int:
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def _lowerCamelCase ( self :Union[str, Any] ) -> list[int]:
# searches pattern in text and returns index positions
__UpperCamelCase : Any = []
for i in range(self.textLen - self.patLen + 1 ):
__UpperCamelCase : List[Any] = self.mismatch_in_text(a )
if mismatch_index == -1:
positions.append(a )
else:
__UpperCamelCase : Any = self.match_in_pattern(self.text[mismatch_index] )
__UpperCamelCase : Dict = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
lowercase : Any = 'ABAABA'
lowercase : str = 'AB'
lowercase : str = BoyerMooreSearch(text, pattern)
lowercase : Union[str, Any] = bms.bad_character_heuristic()
if len(positions) == 0:
print('No match found')
else:
print('Pattern found in following positions: ')
print(positions) | 557 | 1 |
"""simple docstring"""
from typing import Any
def _lowerCamelCase( a ):
if not input_list:
return []
__a = [input_list.count(a ) for value in input_list]
__a = max(a ) # Gets the maximum count in the input list.
# Gets values of modes
return sorted({input_list[i] for i, value in enumerate(a ) if value == y} )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 67 | """simple docstring"""
import argparse
import OmegaConf
import torch
from diffusers import DDIMScheduler, LDMPipeline, UNetLDMModel, VQModel
def _lowerCamelCase( a , a , a ):
__a = OmegaConf.load(a )
__a = torch.load(a , map_location="cpu" )["model"]
__a = list(state_dict.keys() )
# extract state_dict for VQVAE
__a = {}
__a = "first_stage_model."
for key in keys:
if key.startswith(a ):
__a = state_dict[key]
# extract state_dict for UNetLDM
__a = {}
__a = "model.diffusion_model."
for key in keys:
if key.startswith(a ):
__a = state_dict[key]
__a = config.model.params.first_stage_config.params
__a = config.model.params.unet_config.params
__a = VQModel(**a ).eval()
vqvae.load_state_dict(a )
__a = UNetLDMModel(**a ).eval()
unet.load_state_dict(a )
__a = DDIMScheduler(
timesteps=config.model.params.timesteps , beta_schedule="scaled_linear" , beta_start=config.model.params.linear_start , beta_end=config.model.params.linear_end , clip_sample=a , )
__a = LDMPipeline(a , a , a )
pipeline.save_pretrained(a )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__:List[Any] = argparse.ArgumentParser()
parser.add_argument("""--checkpoint_path""", type=str, required=True)
parser.add_argument("""--config_path""", type=str, required=True)
parser.add_argument("""--output_path""", type=str, required=True)
SCREAMING_SNAKE_CASE__:Union[str, Any] = parser.parse_args()
convert_ldm_original(args.checkpoint_path, args.config_path, args.output_path)
| 67 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : Dict = logging.get_logger(__name__)
UpperCamelCase__ : Optional[Any] = {}
class lowerCAmelCase_ ( lowerCamelCase_ ):
__a : int = "llama"
__a : int = ["past_key_values"]
def __init__( self ,snake_case__=32000 ,snake_case__=4096 ,snake_case__=11008 ,snake_case__=32 ,snake_case__=32 ,snake_case__=None ,snake_case__="silu" ,snake_case__=2048 ,snake_case__=0.02 ,snake_case__=1E-6 ,snake_case__=True ,snake_case__=0 ,snake_case__=1 ,snake_case__=2 ,snake_case__=1 ,snake_case__=False ,snake_case__=None ,**snake_case__ ,):
SCREAMING_SNAKE_CASE_ : List[str] = vocab_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : str = hidden_size
SCREAMING_SNAKE_CASE_ : int = intermediate_size
SCREAMING_SNAKE_CASE_ : Any = num_hidden_layers
SCREAMING_SNAKE_CASE_ : str = num_attention_heads
# for backward compatibility
if num_key_value_heads is None:
SCREAMING_SNAKE_CASE_ : Any = num_attention_heads
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_key_value_heads
SCREAMING_SNAKE_CASE_ : List[Any] = hidden_act
SCREAMING_SNAKE_CASE_ : Union[str, Any] = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[int] = rms_norm_eps
SCREAMING_SNAKE_CASE_ : List[str] = pretraining_tp
SCREAMING_SNAKE_CASE_ : int = use_cache
SCREAMING_SNAKE_CASE_ : Any = rope_scaling
self._rope_scaling_validation()
super().__init__(
pad_token_id=snake_case__ ,bos_token_id=snake_case__ ,eos_token_id=snake_case__ ,tie_word_embeddings=snake_case__ ,**snake_case__ ,)
def snake_case ( self ):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling ,snake_case__ ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
F'got {self.rope_scaling}' )
SCREAMING_SNAKE_CASE_ : Any = self.rope_scaling.get('type' ,snake_case__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.rope_scaling.get('factor' ,snake_case__ )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
F'`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}' )
if rope_scaling_factor is None or not isinstance(snake_case__ ,snake_case__ ) or rope_scaling_factor <= 1.0:
raise ValueError(F'`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}' )
| 105 |
'''simple docstring'''
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
lowercase_ : str = logging.getLogger(__name__)
class __UpperCamelCase (_UpperCAmelCase ):
def __init__( self , _lowerCAmelCase=-1 ) -> List[str]:
'''simple docstring'''
lowercase = label_idx
def _a ( self , _lowerCAmelCase , _lowerCAmelCase ) -> List[InputExample]:
'''simple docstring'''
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase = mode.value
lowercase = os.path.join(_lowerCAmelCase , F"""{mode}.txt""" )
lowercase = 1
lowercase = []
with open(_lowerCAmelCase , encoding="""utf-8""" ) as f:
lowercase = []
lowercase = []
for line in f:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" , words=_lowerCAmelCase , labels=_lowerCAmelCase ) )
guid_index += 1
lowercase = []
lowercase = []
else:
lowercase = line.split(""" """ )
words.append(splits[0] )
if len(_lowerCAmelCase ) > 1:
labels.append(splits[self.label_idx].replace("""\n""" , """""" ) )
else:
# Examples could have no label for mode = "test"
labels.append("""O""" )
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" , words=_lowerCAmelCase , labels=_lowerCAmelCase ) )
return examples
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> str:
'''simple docstring'''
lowercase = 0
for line in test_input_reader:
if line.startswith("""-DOCSTART-""" ) or line == "" or line == "\n":
writer.write(_lowerCAmelCase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
lowercase = line.split()[0] + """ """ + preds_list[example_id].pop(0 ) + """\n"""
writer.write(_lowerCAmelCase )
else:
logger.warning("""Maximum sequence length exceeded: No prediction for '%s'.""" , line.split()[0] )
def _a ( self , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
if path:
with open(_lowerCAmelCase , """r""" ) as f:
lowercase = f.read().splitlines()
if "O" not in labels:
lowercase = ["""O"""] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class __UpperCamelCase (_UpperCAmelCase ):
def __init__( self ) -> Dict:
'''simple docstring'''
super().__init__(label_idx=-2 )
def _a ( self , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
if path:
with open(_lowerCAmelCase , """r""" ) as f:
lowercase = f.read().splitlines()
if "O" not in labels:
lowercase = ["""O"""] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class __UpperCamelCase (_UpperCAmelCase ):
def _a ( self , _lowerCAmelCase , _lowerCAmelCase ) -> List[InputExample]:
'''simple docstring'''
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase = mode.value
lowercase = os.path.join(_lowerCAmelCase , F"""{mode}.txt""" )
lowercase = 1
lowercase = []
with open(_lowerCAmelCase , encoding="""utf-8""" ) as f:
for sentence in parse_incr(_lowerCAmelCase ):
lowercase = []
lowercase = []
for token in sentence:
words.append(token["""form"""] )
labels.append(token["""upos"""] )
assert len(_lowerCAmelCase ) == len(_lowerCAmelCase )
if words:
examples.append(InputExample(guid=F"""{mode}-{guid_index}""" , words=_lowerCAmelCase , labels=_lowerCAmelCase ) )
guid_index += 1
return examples
def _a ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
lowercase = 0
for sentence in parse_incr(_lowerCAmelCase ):
lowercase = preds_list[example_id]
lowercase = """"""
for token in sentence:
out += F"""{token['form']} ({token['upos']}|{s_p.pop(0 )}) """
out += "\n"
writer.write(_lowerCAmelCase )
example_id += 1
def _a ( self , _lowerCAmelCase ) -> List[str]:
'''simple docstring'''
if path:
with open(_lowerCAmelCase , """r""" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 588 | 0 |
'''simple docstring'''
from __future__ import annotations
import requests
lowercase__ = set(
"approved_at_utc approved_by author_flair_background_color\nauthor_flair_css_class author_flair_richtext author_flair_template_id author_fullname\nauthor_premium can_mod_post category clicked content_categories created_utc downs\nedited gilded gildings hidden hide_score is_created_from_ads_ui is_meta\nis_original_content is_reddit_media_domain is_video link_flair_css_class\nlink_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title\nname permalink pwls quarantine saved score secure_media secure_media_embed selftext\nsubreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type\ntotal_awards_received ups upvote_ratio url user_reports".split()
)
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ = 1 , UpperCAmelCase_ = "new" , UpperCAmelCase_ = None ):
UpperCAmelCase : Union[str, Any] = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(lowerCamelCase_ ) - valid_terms ) ):
UpperCAmelCase : Tuple = F"""Invalid search term: {invalid_search_terms}"""
raise ValueError(lowerCamelCase_ )
UpperCAmelCase : Optional[Any] = requests.get(
F"""https://reddit.com/r/{subreddit}/{age}.json?limit={limit}""" , headers={'User-agent': 'A random string'} , )
if response.status_code == 4_29:
raise requests.HTTPError
UpperCAmelCase : Any = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(lowerCamelCase_ )}
UpperCAmelCase : int = {}
for id_ in range(lowerCamelCase_ ):
UpperCAmelCase : Any = {
item: data['''data''']['''children'''][id_]['''data'''][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("learnpython", wanted_data=["title", "url", "selftext"]))
| 702 |
'''simple docstring'''
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
lowercase__ = TypeVar("T")
class A_ ( Generic[T] ):
'''simple docstring'''
UpperCAmelCase_ : deque[T] # Cache store of keys
UpperCAmelCase_ : set[T] # References of the keys in cache
UpperCAmelCase_ : int = 10 # Maximum capacity of cache
def __init__( self : List[Any] , lowercase_ : int ) -> None:
UpperCAmelCase : Any = deque()
UpperCAmelCase : Dict = set()
if not n:
UpperCAmelCase : Optional[int] = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
UpperCAmelCase : str = n
def UpperCAmelCase_ ( self : List[str] , lowercase_ : T ) -> None:
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
UpperCAmelCase : Optional[Any] = self.dq_store.pop()
self.key_reference.remove(lowercase_ )
else:
self.dq_store.remove(lowercase_ )
self.dq_store.appendleft(lowercase_ )
self.key_reference.add(lowercase_ )
def UpperCAmelCase_ ( self : Dict ) -> None:
for k in self.dq_store:
print(lowercase_ )
def __repr__( self : Union[str, Any] ) -> str:
return f"""LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"""
if __name__ == "__main__":
import doctest
doctest.testmod()
lowercase__ = LRUCache(4)
lru_cache.refer("A")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("A")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]"
| 695 | 0 |
'''simple docstring'''
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCamelCase : Dict = 16
__lowerCamelCase : int = 32
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 16 ):
"""simple docstring"""
_UpperCamelCase =AutoTokenizer.from_pretrained('''bert-base-cased''' )
_UpperCamelCase =load_dataset('''glue''' , '''mrpc''' )
def tokenize_function(__SCREAMING_SNAKE_CASE ):
# max_length=None => use the model max length (it's actually the default)
_UpperCamelCase =tokenizer(examples['''sentence1'''] , examples['''sentence2'''] , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
_UpperCamelCase =datasets.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , remove_columns=['''idx''', '''sentence1''', '''sentence2'''] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCamelCase =tokenized_datasets.rename_column('''label''' , '''labels''' )
def collate_fn(__SCREAMING_SNAKE_CASE ):
# On TPU it's best to pad everything to the same length or training will be very slow.
_UpperCamelCase =128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
_UpperCamelCase =16
elif accelerator.mixed_precision != "no":
_UpperCamelCase =8
else:
_UpperCamelCase =None
return tokenizer.pad(
__SCREAMING_SNAKE_CASE , padding='''longest''' , max_length=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=__SCREAMING_SNAKE_CASE , return_tensors='''pt''' , )
# Instantiate dataloaders.
_UpperCamelCase =DataLoader(
tokenized_datasets['''train'''] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
_UpperCamelCase =DataLoader(
tokenized_datasets['''validation'''] , shuffle=__SCREAMING_SNAKE_CASE , collate_fn=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS', None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__lowerCamelCase : Optional[int] = mocked_dataloaders # noqa: F811
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if os.environ.get('''TESTING_MOCKED_DATALOADERS''' , __SCREAMING_SNAKE_CASE ) == "1":
_UpperCamelCase =2
# New Code #
_UpperCamelCase =int(args.gradient_accumulation_steps )
# Initialize accelerator
_UpperCamelCase =Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=__SCREAMING_SNAKE_CASE )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'''Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`''' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCamelCase =config['''lr''']
_UpperCamelCase =int(config['''num_epochs'''] )
_UpperCamelCase =int(config['''seed'''] )
_UpperCamelCase =int(config['''batch_size'''] )
_UpperCamelCase =evaluate.load('''glue''' , '''mrpc''' )
set_seed(__SCREAMING_SNAKE_CASE )
_UpperCamelCase , _UpperCamelCase =get_dataloaders(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCamelCase =AutoModelForSequenceClassification.from_pretrained('''bert-base-cased''' , return_dict=__SCREAMING_SNAKE_CASE )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
_UpperCamelCase =model.to(accelerator.device )
# Instantiate optimizer
_UpperCamelCase =AdamW(params=model.parameters() , lr=__SCREAMING_SNAKE_CASE )
# Instantiate scheduler
_UpperCamelCase =get_linear_schedule_with_warmup(
optimizer=__SCREAMING_SNAKE_CASE , num_warmup_steps=100 , num_training_steps=(len(__SCREAMING_SNAKE_CASE ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase =accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Now we train the model
for epoch in range(__SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(__SCREAMING_SNAKE_CASE ):
_UpperCamelCase =model(**__SCREAMING_SNAKE_CASE )
_UpperCamelCase =output.loss
accelerator.backward(__SCREAMING_SNAKE_CASE )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCamelCase =model(**__SCREAMING_SNAKE_CASE )
_UpperCamelCase =outputs.logits.argmax(dim=-1 )
_UpperCamelCase , _UpperCamelCase =accelerator.gather_for_metrics((predictions, batch['''labels''']) )
metric.add_batch(
predictions=__SCREAMING_SNAKE_CASE , references=__SCREAMING_SNAKE_CASE , )
_UpperCamelCase =metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'''epoch {epoch}:''' , __SCREAMING_SNAKE_CASE )
def _a ():
"""simple docstring"""
_UpperCamelCase =argparse.ArgumentParser(description='''Simple example of training script.''' )
parser.add_argument(
'''--mixed_precision''' , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , choices=['''no''', '''fp16''', '''bf16''', '''fp8'''] , help='''Whether to use mixed precision. Choose'''
'''between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'''
'''and an Nvidia Ampere GPU.''' , )
# New Code #
parser.add_argument(
'''--gradient_accumulation_steps''' , type=__SCREAMING_SNAKE_CASE , default=1 , help='''The number of minibatches to be ran before gradients are accumulated.''' , )
parser.add_argument('''--cpu''' , action='''store_true''' , help='''If passed, will train on the CPU.''' )
_UpperCamelCase =parser.parse_args()
_UpperCamelCase ={'''lr''': 2E-5, '''num_epochs''': 3, '''seed''': 42, '''batch_size''': 16}
training_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main()
| 404 |
'''simple docstring'''
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class UpperCAmelCase ( lowercase_):
"""simple docstring"""
lowerCAmelCase_ = (UnCLIPScheduler,)
def UpperCamelCase__ ( self : Union[str, Any] , **UpperCamelCase__ : Dict ) -> Optional[Any]:
_UpperCamelCase ={
'''num_train_timesteps''': 1000,
'''variance_type''': '''fixed_small_log''',
'''clip_sample''': True,
'''clip_sample_range''': 1.0,
'''prediction_type''': '''epsilon''',
}
config.update(**UpperCamelCase__ )
return config
def UpperCamelCase__ ( self : str ) -> Tuple:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def UpperCamelCase__ ( self : List[str] ) -> int:
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=UpperCamelCase__ )
def UpperCamelCase__ ( self : List[str] ) -> List[Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=UpperCamelCase__ )
def UpperCamelCase__ ( self : List[Any] ) -> List[str]:
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=UpperCamelCase__ )
def UpperCamelCase__ ( self : List[Any] ) -> Dict:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def UpperCamelCase__ ( self : Optional[Any] ) -> Tuple:
for time_step in [0, 500, 999]:
for prev_timestep in [None, 5, 100, 250, 500, 750]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=UpperCamelCase__ , prev_timestep=UpperCamelCase__ )
def UpperCamelCase__ ( self : List[str] ) -> Dict:
_UpperCamelCase =self.scheduler_classes[0]
_UpperCamelCase =self.get_scheduler_config(variance_type='''fixed_small_log''' )
_UpperCamelCase =scheduler_class(**UpperCamelCase__ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.0000E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0549625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.9994987 ) ) < 1E-5
def UpperCamelCase__ ( self : Any ) -> Tuple:
_UpperCamelCase =self.scheduler_classes[0]
_UpperCamelCase =self.get_scheduler_config(variance_type='''learned_range''' )
_UpperCamelCase =scheduler_class(**UpperCamelCase__ )
_UpperCamelCase =0.5
assert scheduler._get_variance(1 , predicted_variance=UpperCamelCase__ ) - -10.1712790 < 1E-5
assert scheduler._get_variance(487 , predicted_variance=UpperCamelCase__ ) - -5.7998052 < 1E-5
assert scheduler._get_variance(999 , predicted_variance=UpperCamelCase__ ) - -0.0010011 < 1E-5
def UpperCamelCase__ ( self : str ) -> Optional[int]:
_UpperCamelCase =self.scheduler_classes[0]
_UpperCamelCase =self.get_scheduler_config()
_UpperCamelCase =scheduler_class(**UpperCamelCase__ )
_UpperCamelCase =scheduler.timesteps
_UpperCamelCase =self.dummy_model()
_UpperCamelCase =self.dummy_sample_deter
_UpperCamelCase =torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
_UpperCamelCase =model(UpperCamelCase__ , UpperCamelCase__ )
# 2. predict previous mean of sample x_t-1
_UpperCamelCase =scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample
_UpperCamelCase =pred_prev_sample
_UpperCamelCase =torch.sum(torch.abs(UpperCamelCase__ ) )
_UpperCamelCase =torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 252.2682495 ) < 1E-2
assert abs(result_mean.item() - 0.3284743 ) < 1E-3
def UpperCamelCase__ ( self : List[str] ) -> Union[str, Any]:
_UpperCamelCase =self.scheduler_classes[0]
_UpperCamelCase =self.get_scheduler_config()
_UpperCamelCase =scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(25 )
_UpperCamelCase =scheduler.timesteps
_UpperCamelCase =self.dummy_model()
_UpperCamelCase =self.dummy_sample_deter
_UpperCamelCase =torch.manual_seed(0 )
for i, t in enumerate(UpperCamelCase__ ):
# 1. predict noise residual
_UpperCamelCase =model(UpperCamelCase__ , UpperCamelCase__ )
if i + 1 == timesteps.shape[0]:
_UpperCamelCase =None
else:
_UpperCamelCase =timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
_UpperCamelCase =scheduler.step(
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , prev_timestep=UpperCamelCase__ , generator=UpperCamelCase__ ).prev_sample
_UpperCamelCase =pred_prev_sample
_UpperCamelCase =torch.sum(torch.abs(UpperCamelCase__ ) )
_UpperCamelCase =torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_sum.item() - 258.2044983 ) < 1E-2
assert abs(result_mean.item() - 0.3362038 ) < 1E-3
def UpperCamelCase__ ( self : str ) -> Any:
pass
def UpperCamelCase__ ( self : List[str] ) -> str:
pass
| 404 | 1 |
'''simple docstring'''
from __future__ import annotations
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Tuple = position
_snake_case : List[Any] = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
_snake_case : Optional[Any] = []
for position in positions:
_snake_case : Union[str, Any] = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(_lowerCamelCase )
return permissible_positions
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
return not any(elem == 0 for row in board for elem in row )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
if is_complete(_lowerCamelCase ):
return True
for position in get_valid_pos(_lowerCamelCase , len(_lowerCamelCase ) ):
_snake_case : List[str] = position
if board[y][x] == 0:
_snake_case : Optional[Any] = curr + 1
if open_knight_tour_helper(_lowerCamelCase , _lowerCamelCase , curr + 1 ):
return True
_snake_case : int = 0
return False
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Any = [[0 for i in range(_lowerCamelCase )] for j in range(_lowerCamelCase )]
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
_snake_case : Optional[int] = 1
if open_knight_tour_helper(_lowerCamelCase , (i, j) , 1 ):
return board
_snake_case : Any = 0
_snake_case : Optional[int] = f'''Open Kight Tour cannot be performed on a board of size {n}'''
raise ValueError(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705 |
'''simple docstring'''
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
UpperCAmelCase : str = logging.getLogger(__name__)
UpperCAmelCase : Dict = 5_0 # max width of layer names
UpperCAmelCase : Union[str, Any] = 7_0 # max width of quantizer names
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Dict = parser.add_argument_group('''quant_trainer arguments''' )
group.add_argument('''--wprec''' , type=lowerCAmelCase_ , default=8 , help='''weight precision''' )
group.add_argument('''--aprec''' , type=lowerCAmelCase_ , default=8 , help='''activation precision''' )
group.add_argument('''--quant-per-tensor''' , action='''store_true''' , help='''per tensor weight scaling''' )
group.add_argument('''--quant-disable''' , action='''store_true''' , help='''disable all quantizers''' )
group.add_argument('''--quant-disable-embeddings''' , action='''store_true''' , help='''disable all embeddings quantizers''' )
group.add_argument('''--quant-disable-keyword''' , type=lowerCAmelCase_ , nargs='''+''' , help='''disable quantizers by keyword''' )
group.add_argument('''--quant-disable-layer-module''' , type=lowerCAmelCase_ , help='''disable quantizers by keyword under layer.''' )
group.add_argument('''--quant-enable-layer-module''' , type=lowerCAmelCase_ , help='''enable quantizers by keyword under layer''' )
group.add_argument('''--calibrator''' , default='''max''' , help='''which quantization range calibrator to use''' )
group.add_argument('''--percentile''' , default=lowerCAmelCase_ , type=lowerCAmelCase_ , help='''percentile for PercentileCalibrator''' )
group.add_argument('''--fuse-qkv''' , action='''store_true''' , help='''use the same scale factor for qkv''' )
group.add_argument('''--clip-gelu''' , metavar='''N''' , type=lowerCAmelCase_ , help='''clip gelu output maximum value to N''' )
group.add_argument(
'''--recalibrate-weights''' , action='''store_true''' , help=(
'''recalibrate weight amaxes by taking the max of the weights.'''
''' amaxes will be computed with the current quantization granularity (axis).'''
) , )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
if args.calibrator == "max":
_snake_case : Optional[int] = '''max'''
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('''Specify --percentile when using percentile calibrator''' )
_snake_case : Tuple = '''histogram'''
elif args.calibrator == "mse":
_snake_case : int = '''histogram'''
else:
raise ValueError(f'''Invalid calibrator {args.calibrator}''' )
_snake_case : Tuple = QuantDescriptor(num_bits=args.aprec , calib_method=lowerCAmelCase_ )
_snake_case : str = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(lowerCAmelCase_ )
quant_nn.QuantLinear.set_default_quant_desc_weight(lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False ):
"""simple docstring"""
logger.info('''Configuring Model for Quantization''' )
logger.info(f'''using quantization package {pytorch_quantization.__file__}''' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(lowerCAmelCase_ , ['''embeddings'''] , which='''weight''' , _disabled=lowerCAmelCase_ )
if args.quant_disable:
set_quantizer_by_name(lowerCAmelCase_ , [''''''] , _disabled=lowerCAmelCase_ )
if args.quant_disable_keyword:
set_quantizer_by_name(lowerCAmelCase_ , args.quant_disable_keyword , _disabled=lowerCAmelCase_ )
if args.quant_disable_layer_module:
set_quantizer_by_name(lowerCAmelCase_ , [R'''layer.\d+.''' + args.quant_disable_layer_module] , _disabled=lowerCAmelCase_ )
if args.quant_enable_layer_module:
set_quantizer_by_name(lowerCAmelCase_ , [R'''layer.\d+.''' + args.quant_enable_layer_module] , _disabled=lowerCAmelCase_ )
if args.recalibrate_weights:
recalibrate_weights(lowerCAmelCase_ )
if args.fuse_qkv:
fuse_qkv(lowerCAmelCase_ , lowerCAmelCase_ )
if args.clip_gelu:
clip_gelu(lowerCAmelCase_ , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(lowerCAmelCase_ )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
logger.info('''Enabling Calibration''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(f'''{name:80}: {module}''' )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
logger.info('''Loading calibrated amax''' )
for name, module in model.named_modules():
if name.endswith('''_quantizer''' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('''percentile''' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
def fusea(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
for mod in [qq, qk, qv]:
if not hasattr(lowerCAmelCase_ , '''_amax''' ):
print(''' WARNING: NO AMAX BUFFER''' )
return
_snake_case : Tuple = qq._amax.detach().item()
_snake_case : Tuple = qk._amax.detach().item()
_snake_case : List[Any] = qv._amax.detach().item()
_snake_case : List[str] = max(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
qq._amax.fill_(lowerCAmelCase_ )
qk._amax.fill_(lowerCAmelCase_ )
qv._amax.fill_(lowerCAmelCase_ )
logger.info(f''' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}''' )
for name, mod in model.named_modules():
if name.endswith('''.attention.self''' ):
logger.info(f'''FUSE_QKV: {name:{name_width}}''' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if name.endswith('''.output.dense''' ) and not name.endswith('''attention.output.dense''' ):
_snake_case : List[Any] = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=lowerCAmelCase_ )
_snake_case : List[str] = mod._input_quantizer._amax.data.detach().item()
logger.info(f'''CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}''' )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ) and mod._weight_quantizer.axis is not None:
_snake_case : Dict = mod.weight.shape[0]
_snake_case : Optional[int] = mod._weight_quantizer._amax.detach()
_snake_case : Optional[int] = torch.ones(lowerCAmelCase_ , dtype=amax.dtype , device=amax.device ) * amax
print(f'''expanding {name} {amax} -> {mod._weight_quantizer._amax}''' )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ):
if not hasattr(mod.weight_quantizer , '''_amax''' ):
print('''RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER''' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
_snake_case : int = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
_snake_case : Dict = set(range(len(mod.weight.size() ) ) ) - axis_set
_snake_case : Optional[int] = pytorch_quantization.utils.reduce_amax(mod.weight , axis=lowerCAmelCase_ , keepdims=lowerCAmelCase_ ).detach()
logger.info(f'''RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}''' )
_snake_case : Tuple = amax
def _a ( lowerCAmelCase_ , lowerCAmelCase_=25 , lowerCAmelCase_=180 , lowerCAmelCase_=None ):
"""simple docstring"""
if ignore is None:
_snake_case : Dict = []
elif not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case : Optional[int] = [ignore]
_snake_case : str = 0
for name, mod in model.named_modules():
if not hasattr(lowerCAmelCase_ , '''weight''' ):
continue
_snake_case : Optional[int] = max(lowerCAmelCase_ , len(lowerCAmelCase_ ) )
for name, mod in model.named_modules():
_snake_case : Optional[Any] = getattr(lowerCAmelCase_ , '''_input_quantizer''' , lowerCAmelCase_ )
_snake_case : Tuple = getattr(lowerCAmelCase_ , '''_weight_quantizer''' , lowerCAmelCase_ )
if not hasattr(lowerCAmelCase_ , '''weight''' ):
continue
if type(lowerCAmelCase_ ) in ignore:
continue
if [True for s in ignore if type(lowerCAmelCase_ ) is str and s in name]:
continue
_snake_case : Optional[int] = f'''Act:{input_q.extra_repr()}'''
_snake_case : Any = f'''Wgt:{weight_q.extra_repr()}'''
_snake_case : Optional[int] = f'''{name:{name_width}} {act_str} {wgt_str}'''
if len(lowerCAmelCase_ ) <= line_width:
logger.info(lowerCAmelCase_ )
else:
logger.info(f'''{name:{name_width}} {act_str}''' )
logger.info(f'''{" ":{name_width}} {wgt_str}''' )
def _a ( lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : str = 0
for name, mod in model.named_modules():
if isinstance(lowerCAmelCase_ , pytorch_quantization.nn.TensorQuantizer ):
print(f'''{name:80} {mod}''' )
count += 1
print(f'''{count} TensorQuantizers found in model''' )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if quantizer_mod is not None:
assert hasattr(lowerCAmelCase_ , lowerCAmelCase_ )
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
logger.warning(f'''{name} has no {quantizer}''' )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_="both" , **lowerCAmelCase_ ):
"""simple docstring"""
_snake_case : Optional[Any] = f'''Warning: changing {which} quantizers of {name:{qname_width}}'''
for k, v in kwargs.items():
s += f''' {k}={v}'''
if which in ["input", "both"]:
set_quantizer(lowerCAmelCase_ , lowerCAmelCase_ , '''_input_quantizer''' , lowerCAmelCase_ , lowerCAmelCase_ )
if which in ["weight", "both"]:
set_quantizer(lowerCAmelCase_ , lowerCAmelCase_ , '''_weight_quantizer''' , lowerCAmelCase_ , lowerCAmelCase_ )
logger.info(lowerCAmelCase_ )
def _a ( lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ ):
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(lowerCAmelCase_ , '''_input_quantizer''' ) or hasattr(lowerCAmelCase_ , '''_weight_quantizer''' ):
for n in names:
if re.search(lowerCAmelCase_ , lowerCAmelCase_ ):
set_quantizers(lowerCAmelCase_ , lowerCAmelCase_ , **lowerCAmelCase_ )
elif name.endswith('''_quantizer''' ):
for n in names:
if re.search(lowerCAmelCase_ , lowerCAmelCase_ ):
_snake_case : Any = f'''Warning: changing {name:{name_width}}'''
for k, v in kwargs.items():
s += f''' {k}={v}'''
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
logger.info(lowerCAmelCase_ )
| 47 | 0 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
_a = pytest.mark.integration
@pytest.mark.parametrize('''path''' ,['''paws''', '''csv'''] )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Union[str, Any]:
'''simple docstring'''
inspect_dataset(__snake_case ,__snake_case )
lowerCamelCase__ = path + '''.py'''
assert script_name in os.listdir(__snake_case )
assert "__pycache__" not in os.listdir(__snake_case )
@pytest.mark.filterwarnings('''ignore:inspect_metric is deprecated:FutureWarning''' )
@pytest.mark.filterwarnings('''ignore:metric_module_factory is deprecated:FutureWarning''' )
@pytest.mark.parametrize('''path''' ,['''accuracy'''] )
def lowerCAmelCase__(__snake_case ,__snake_case ) -> List[str]:
'''simple docstring'''
inspect_metric(__snake_case ,__snake_case )
lowerCamelCase__ = path + '''.py'''
assert script_name in os.listdir(__snake_case )
assert "__pycache__" not in os.listdir(__snake_case )
@pytest.mark.parametrize(
'''path, config_name, expected_splits''' ,[
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] ,)
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> Optional[Any]:
'''simple docstring'''
lowerCamelCase__ = get_dataset_config_info(__snake_case ,config_name=__snake_case )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' ,[
('''paws''', None, ValueError),
] ,)
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> List[Any]:
'''simple docstring'''
with pytest.raises(__snake_case ):
get_dataset_config_info(__snake_case ,config_name=__snake_case )
@pytest.mark.parametrize(
'''path, expected''' ,[
('''squad''', '''plain_text'''),
('''acronym_identification''', '''default'''),
('''lhoestq/squad''', '''plain_text'''),
('''lhoestq/test''', '''default'''),
('''lhoestq/demo1''', '''lhoestq--demo1'''),
('''dalle-mini/wit''', '''dalle-mini--wit'''),
] ,)
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Any:
'''simple docstring'''
lowerCamelCase__ = get_dataset_config_names(__snake_case )
assert expected in config_names
@pytest.mark.parametrize(
'''path, expected_configs, expected_splits_in_first_config''' ,[
('''squad''', ['''plain_text'''], ['''train''', '''validation''']),
('''dalle-mini/wit''', ['''dalle-mini--wit'''], ['''train''']),
('''paws''', ['''labeled_final''', '''labeled_swap''', '''unlabeled_final'''], ['''train''', '''test''', '''validation''']),
] ,)
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> int:
'''simple docstring'''
lowerCamelCase__ = get_dataset_infos(__snake_case )
assert list(infos.keys() ) == expected_configs
lowerCamelCase__ = expected_configs[0]
assert expected_config in infos
lowerCamelCase__ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
'''path, expected_config, expected_splits''' ,[
('''squad''', '''plain_text''', ['''train''', '''validation''']),
('''dalle-mini/wit''', '''dalle-mini--wit''', ['''train''']),
('''paws''', '''labeled_final''', ['''train''', '''test''', '''validation''']),
] ,)
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> str:
'''simple docstring'''
lowerCamelCase__ = get_dataset_infos(__snake_case )
assert expected_config in infos
lowerCamelCase__ = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
'''path, config_name, expected_exception''' ,[
('''paws''', None, ValueError),
] ,)
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case ) -> List[str]:
'''simple docstring'''
with pytest.raises(__snake_case ):
get_dataset_split_names(__snake_case ,config_name=__snake_case )
| 481 |
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def lowerCAmelCase__(__snake_case ) -> Dict:
'''simple docstring'''
if "img_encoder.pos_embed" in name:
lowerCamelCase__ = name.replace('''img_encoder.pos_embed''' ,'''vision_model.embeddings.position_embeddings''' )
if "img_encoder.patch_embed.proj" in name:
lowerCamelCase__ = name.replace('''img_encoder.patch_embed.proj''' ,'''vision_model.embeddings.patch_embeddings.projection''' )
if "img_encoder.patch_embed.norm" in name:
lowerCamelCase__ = name.replace('''img_encoder.patch_embed.norm''' ,'''vision_model.embeddings.layernorm''' )
if "img_encoder.layers" in name:
lowerCamelCase__ = name.replace('''img_encoder.layers''' ,'''vision_model.encoder.stages''' )
if "blocks" in name and "res" not in name:
lowerCamelCase__ = name.replace('''blocks''' ,'''layers''' )
if "attn" in name and "pre_assign" not in name:
lowerCamelCase__ = name.replace('''attn''' ,'''self_attn''' )
if "proj" in name and "self_attn" in name and "text" not in name:
lowerCamelCase__ = name.replace('''proj''' ,'''out_proj''' )
if "pre_assign_attn.attn.proj" in name:
lowerCamelCase__ = name.replace('''pre_assign_attn.attn.proj''' ,'''pre_assign_attn.attn.out_proj''' )
if "norm1" in name:
lowerCamelCase__ = name.replace('''norm1''' ,'''layer_norm1''' )
if "norm2" in name and "pre_assign" not in name:
lowerCamelCase__ = name.replace('''norm2''' ,'''layer_norm2''' )
if "img_encoder.norm" in name:
lowerCamelCase__ = name.replace('''img_encoder.norm''' ,'''vision_model.layernorm''' )
# text encoder
if "text_encoder.token_embedding" in name:
lowerCamelCase__ = name.replace('''text_encoder.token_embedding''' ,'''text_model.embeddings.token_embedding''' )
if "text_encoder.positional_embedding" in name:
lowerCamelCase__ = name.replace('''text_encoder.positional_embedding''' ,'''text_model.embeddings.position_embedding.weight''' )
if "text_encoder.transformer.resblocks." in name:
lowerCamelCase__ = name.replace('''text_encoder.transformer.resblocks.''' ,'''text_model.encoder.layers.''' )
if "ln_1" in name:
lowerCamelCase__ = name.replace('''ln_1''' ,'''layer_norm1''' )
if "ln_2" in name:
lowerCamelCase__ = name.replace('''ln_2''' ,'''layer_norm2''' )
if "c_fc" in name:
lowerCamelCase__ = name.replace('''c_fc''' ,'''fc1''' )
if "c_proj" in name:
lowerCamelCase__ = name.replace('''c_proj''' ,'''fc2''' )
if "text_encoder" in name:
lowerCamelCase__ = name.replace('''text_encoder''' ,'''text_model''' )
if "ln_final" in name:
lowerCamelCase__ = name.replace('''ln_final''' ,'''final_layer_norm''' )
# projection layers
if "img_projector.linear_hidden." in name:
lowerCamelCase__ = name.replace('''img_projector.linear_hidden.''' ,'''visual_projection.''' )
if "img_projector.linear_out." in name:
lowerCamelCase__ = name.replace('''img_projector.linear_out.''' ,'''visual_projection.3.''' )
if "text_projector.linear_hidden" in name:
lowerCamelCase__ = name.replace('''text_projector.linear_hidden''' ,'''text_projection''' )
if "text_projector.linear_out" in name:
lowerCamelCase__ = name.replace('''text_projector.linear_out''' ,'''text_projection.3''' )
return name
def lowerCAmelCase__(__snake_case ,__snake_case ) -> Optional[Any]:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
lowerCamelCase__ = orig_state_dict.pop(__snake_case )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCamelCase__ = key.split('''.''' )
lowerCamelCase__ , lowerCamelCase__ = int(key_split[2] ), int(key_split[4] )
lowerCamelCase__ = config.vision_config.hidden_size
if "weight" in key:
lowerCamelCase__ = val[:dim, :]
lowerCamelCase__ = val[dim : dim * 2, :]
lowerCamelCase__ = val[-dim:, :]
else:
lowerCamelCase__ = val[:dim]
lowerCamelCase__ = val[dim : dim * 2]
lowerCamelCase__ = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
lowerCamelCase__ = key.split('''.''' )
lowerCamelCase__ = int(key_split[3] )
lowerCamelCase__ = config.text_config.hidden_size
if "weight" in key:
lowerCamelCase__ = val[:dim, :]
lowerCamelCase__ = val[
dim : dim * 2, :
]
lowerCamelCase__ = val[-dim:, :]
else:
lowerCamelCase__ = val[:dim]
lowerCamelCase__ = val[dim : dim * 2]
lowerCamelCase__ = val[-dim:]
else:
lowerCamelCase__ = rename_key(__snake_case )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
lowerCamelCase__ = val.squeeze_()
else:
lowerCamelCase__ = val
return orig_state_dict
def lowerCAmelCase__() -> int:
'''simple docstring'''
lowerCamelCase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowerCamelCase__ = Image.open(requests.get(__snake_case ,stream=__snake_case ).raw )
return im
@torch.no_grad()
def lowerCAmelCase__(__snake_case ,__snake_case ,__snake_case="groupvit-gcc-yfcc" ,__snake_case=False ) -> Dict:
'''simple docstring'''
lowerCamelCase__ = GroupViTConfig()
lowerCamelCase__ = GroupViTModel(__snake_case ).eval()
lowerCamelCase__ = torch.load(__snake_case ,map_location='''cpu''' )['''model''']
lowerCamelCase__ = convert_state_dict(__snake_case ,__snake_case )
lowerCamelCase__ , lowerCamelCase__ = model.load_state_dict(__snake_case ,strict=__snake_case )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(__snake_case ) == 0)
# verify result
lowerCamelCase__ = CLIPProcessor.from_pretrained('''openai/clip-vit-base-patch32''' )
lowerCamelCase__ = prepare_img()
lowerCamelCase__ = processor(text=['''a photo of a cat''', '''a photo of a dog'''] ,images=__snake_case ,padding=__snake_case ,return_tensors='''pt''' )
with torch.no_grad():
lowerCamelCase__ = model(**__snake_case )
if model_name == "groupvit-gcc-yfcc":
lowerCamelCase__ = torch.tensor([[1_3.3_5_2_3, 6.3_6_2_9]] )
elif model_name == "groupvit-gcc-redcaps":
lowerCamelCase__ = torch.tensor([[1_6.1_8_7_3, 8.6_2_3_0]] )
else:
raise ValueError(F'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image ,__snake_case ,atol=1E-3 )
processor.save_pretrained(__snake_case )
model.save_pretrained(__snake_case )
print('''Successfully saved processor and model to''' ,__snake_case )
if push_to_hub:
print('''Pushing to the hub...''' )
processor.push_to_hub(__snake_case ,organization='''nielsr''' )
model.push_to_hub(__snake_case ,organization='''nielsr''' )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to dump the processor and PyTorch model."
)
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to GroupViT checkpoint")
parser.add_argument(
"--model_name",
default="groupvit-gccy-fcc",
type=str,
help="Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.",
)
_a = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 481 | 1 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
UpperCAmelCase_ = {"""LayoutLMv2Config""", """LayoutLMv3Config"""}
@is_pipeline_test
class UpperCamelCase_ ( unittest.TestCase ):
lowerCAmelCase_ = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCAmelCase_ = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowerCAmelCase_ = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowerCAmelCase_ = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def lowerCAmelCase ( self ) -> Optional[int]:
_snake_case = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' )
_snake_case = text_classifier('This is great !' )
self.assertEqual(nested_simplify(lowerCAmelCase_ ) , [{'label': 'LABEL_0', 'score': 0.5_04}] )
_snake_case = text_classifier('This is great !' , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}] )
_snake_case = text_classifier(['This is great !', 'This is bad'] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [
[{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}],
[{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}],
] , )
_snake_case = text_classifier('This is great !' , top_k=1 )
self.assertEqual(nested_simplify(lowerCAmelCase_ ) , [{'label': 'LABEL_0', 'score': 0.5_04}] )
# Legacy behavior
_snake_case = text_classifier('This is great !' , return_all_scores=lowerCAmelCase_ )
self.assertEqual(nested_simplify(lowerCAmelCase_ ) , [{'label': 'LABEL_0', 'score': 0.5_04}] )
_snake_case = text_classifier('This is great !' , return_all_scores=lowerCAmelCase_ )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [[{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}]] )
_snake_case = text_classifier(['This is great !', 'Something else'] , return_all_scores=lowerCAmelCase_ )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [
[{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}],
[{'label': 'LABEL_0', 'score': 0.5_04}, {'label': 'LABEL_1', 'score': 0.4_96}],
] , )
_snake_case = text_classifier(['This is great !', 'Something else'] , return_all_scores=lowerCAmelCase_ )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [
{'label': 'LABEL_0', 'score': 0.5_04},
{'label': 'LABEL_0', 'score': 0.5_04},
] , )
@require_torch
def lowerCAmelCase ( self ) -> List[str]:
import torch
_snake_case = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='pt' , device=torch.device('cpu' ) , )
_snake_case = text_classifier('This is great !' )
self.assertEqual(nested_simplify(lowerCAmelCase_ ) , [{'label': 'LABEL_0', 'score': 0.5_04}] )
@require_tf
def lowerCAmelCase ( self ) -> int:
_snake_case = pipeline(
task='text-classification' , model='hf-internal-testing/tiny-random-distilbert' , framework='tf' )
_snake_case = text_classifier('This is great !' )
self.assertEqual(nested_simplify(lowerCAmelCase_ ) , [{'label': 'LABEL_0', 'score': 0.5_04}] )
@slow
@require_torch
def lowerCAmelCase ( self ) -> Union[str, Any]:
_snake_case = pipeline('text-classification' )
_snake_case = text_classifier('This is great !' )
self.assertEqual(nested_simplify(lowerCAmelCase_ ) , [{'label': 'POSITIVE', 'score': 1.0}] )
_snake_case = text_classifier('This is bad !' )
self.assertEqual(nested_simplify(lowerCAmelCase_ ) , [{'label': 'NEGATIVE', 'score': 1.0}] )
_snake_case = text_classifier('Birds are a type of animal' )
self.assertEqual(nested_simplify(lowerCAmelCase_ ) , [{'label': 'POSITIVE', 'score': 0.9_88}] )
@slow
@require_tf
def lowerCAmelCase ( self ) -> List[Any]:
_snake_case = pipeline('text-classification' , framework='tf' )
_snake_case = text_classifier('This is great !' )
self.assertEqual(nested_simplify(lowerCAmelCase_ ) , [{'label': 'POSITIVE', 'score': 1.0}] )
_snake_case = text_classifier('This is bad !' )
self.assertEqual(nested_simplify(lowerCAmelCase_ ) , [{'label': 'NEGATIVE', 'score': 1.0}] )
_snake_case = text_classifier('Birds are a type of animal' )
self.assertEqual(nested_simplify(lowerCAmelCase_ ) , [{'label': 'POSITIVE', 'score': 0.9_88}] )
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
_snake_case = TextClassificationPipeline(model=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ )
return text_classifier, ["HuggingFace is in", "This is another test"]
def lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
_snake_case = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
_snake_case = 'HuggingFace is in'
_snake_case = text_classifier(lowerCAmelCase_ )
self.assertEqual(nested_simplify(lowerCAmelCase_ ) , [{'label': ANY(lowerCAmelCase_ ), 'score': ANY(lowerCAmelCase_ )}] )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
_snake_case = ['HuggingFace is in ', 'Paris is in France']
_snake_case = text_classifier(lowerCAmelCase_ )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [{'label': ANY(lowerCAmelCase_ ), 'score': ANY(lowerCAmelCase_ )}, {'label': ANY(lowerCAmelCase_ ), 'score': ANY(lowerCAmelCase_ )}] , )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
self.assertTrue(outputs[1]['label'] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
_snake_case = text_classifier(lowerCAmelCase_ , top_k=lowerCAmelCase_ )
_snake_case = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [[{'label': ANY(lowerCAmelCase_ ), 'score': ANY(lowerCAmelCase_ )}] * N, [{'label': ANY(lowerCAmelCase_ ), 'score': ANY(lowerCAmelCase_ )}] * N] , )
_snake_case = {'text': 'HuggingFace is in ', 'text_pair': 'Paris is in France'}
_snake_case = text_classifier(lowerCAmelCase_ )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , {'label': ANY(lowerCAmelCase_ ), 'score': ANY(lowerCAmelCase_ )} , )
self.assertTrue(outputs['label'] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
_snake_case = [['HuggingFace is in ', 'Paris is in France']]
with self.assertRaises(lowerCAmelCase_ ):
text_classifier(lowerCAmelCase_ )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
_snake_case = text_classifier([[['HuggingFace is in ', 'Paris is in France']]] )
self.assertEqual(
nested_simplify(lowerCAmelCase_ ) , [{'label': ANY(lowerCAmelCase_ ), 'score': ANY(lowerCAmelCase_ )}] , )
self.assertTrue(outputs[0]['label'] in model.config.idalabel.values() )
| 541 |
import argparse
from collections import defaultdict
import yaml
UpperCAmelCase_ = """docs/source/en/_toctree.yml"""
def lowerCamelCase__ ( UpperCamelCase__ : Optional[Any] ) -> str:
'''simple docstring'''
_snake_case = defaultdict(UpperCamelCase__ )
_snake_case = []
_snake_case = []
for doc in doc_list:
if "local" in doc:
counts[doc["local"]] += 1
if doc["title"].lower() == "overview":
overview_doc.append({'local': doc['local'], 'title': doc['title']} )
else:
new_doc_list.append(UpperCamelCase__ )
_snake_case = new_doc_list
_snake_case = [key for key, value in counts.items() if value > 1]
_snake_case = []
for duplicate_key in duplicates:
_snake_case = list({doc['title'] for doc in doc_list if doc['local'] == duplicate_key} )
if len(UpperCamelCase__ ) > 1:
raise ValueError(
F'''{duplicate_key} is present several times in the documentation table of content at '''
'`docs/source/en/_toctree.yml` with different *Title* values. Choose one of those and remove the '
'others.' )
# Only add this once
new_doc.append({'local': duplicate_key, 'title': titles[0]} )
# Add none duplicate-keys
new_doc.extend([doc for doc in doc_list if 'local' not in counts or counts[doc['local']] == 1] )
_snake_case = sorted(UpperCamelCase__ , key=lambda UpperCamelCase__ : s["title"].lower() )
# "overview" gets special treatment and is always first
if len(UpperCamelCase__ ) > 1:
raise ValueError('{doc_list} has two \'overview\' docs which is not allowed.' )
overview_doc.extend(UpperCamelCase__ )
# Sort
return overview_doc
def lowerCamelCase__ ( UpperCamelCase__ : Dict=False ) -> Optional[int]:
'''simple docstring'''
with open(UpperCamelCase__ , encoding='utf-8' ) as f:
_snake_case = yaml.safe_load(f.read() )
# Get to the API doc
_snake_case = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_snake_case = content[api_idx]['sections']
# Then to the model doc
_snake_case = 0
while api_doc[scheduler_idx]["title"] != "Schedulers":
scheduler_idx += 1
_snake_case = api_doc[scheduler_idx]['sections']
_snake_case = clean_doc_toc(UpperCamelCase__ )
_snake_case = False
if new_scheduler_doc != scheduler_doc:
_snake_case = True
if overwrite:
_snake_case = new_scheduler_doc
if diff:
if overwrite:
_snake_case = api_doc
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(UpperCamelCase__ , allow_unicode=UpperCamelCase__ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
def lowerCamelCase__ ( UpperCamelCase__ : Tuple=False ) -> List[Any]:
'''simple docstring'''
with open(UpperCamelCase__ , encoding='utf-8' ) as f:
_snake_case = yaml.safe_load(f.read() )
# Get to the API doc
_snake_case = 0
while content[api_idx]["title"] != "API":
api_idx += 1
_snake_case = content[api_idx]['sections']
# Then to the model doc
_snake_case = 0
while api_doc[pipeline_idx]["title"] != "Pipelines":
pipeline_idx += 1
_snake_case = False
_snake_case = api_doc[pipeline_idx]['sections']
_snake_case = []
# sort sub pipeline docs
for pipeline_doc in pipeline_docs:
if "section" in pipeline_doc:
_snake_case = pipeline_doc['section']
_snake_case = clean_doc_toc(UpperCamelCase__ )
if overwrite:
_snake_case = new_sub_pipeline_doc
new_pipeline_docs.append(UpperCamelCase__ )
# sort overall pipeline doc
_snake_case = clean_doc_toc(UpperCamelCase__ )
if new_pipeline_docs != pipeline_docs:
_snake_case = True
if overwrite:
_snake_case = new_pipeline_docs
if diff:
if overwrite:
_snake_case = api_doc
with open(UpperCamelCase__ , 'w' , encoding='utf-8' ) as f:
f.write(yaml.dump(UpperCamelCase__ , allow_unicode=UpperCamelCase__ ) )
else:
raise ValueError(
'The model doc part of the table of content is not properly sorted, run `make style` to fix this.' )
if __name__ == "__main__":
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
UpperCAmelCase_ = parser.parse_args()
check_scheduler_doc(args.fix_and_overwrite)
check_pipeline_doc(args.fix_and_overwrite)
| 541 | 1 |
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class _lowerCAmelCase:
"""simple docstring"""
a : int
a : Node | None =None
a : Node | None =None
def snake_case () -> Node | None:
UpperCamelCase_: int = Node(1 )
UpperCamelCase_: Any = Node(2 )
UpperCamelCase_: Any = Node(3 )
UpperCamelCase_: List[str] = Node(4 )
UpperCamelCase_: List[Any] = Node(5 )
return tree
def snake_case (UpperCAmelCase__ ) -> list[int]:
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def snake_case (UpperCAmelCase__ ) -> list[int]:
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def snake_case (UpperCAmelCase__ ) -> list[int]:
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def snake_case (UpperCAmelCase__ ) -> int:
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def snake_case (UpperCAmelCase__ ) -> Sequence[Node | None]:
UpperCamelCase_: list[Any] = []
if root is None:
return output
UpperCamelCase_: List[str] = deque([root] )
while process_queue:
UpperCamelCase_: Dict = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> Sequence[Node | None]:
UpperCamelCase_: list[Any] = []
def populate_output(UpperCAmelCase__ , UpperCAmelCase__ ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(UpperCAmelCase__ , UpperCAmelCase__ )
return output
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> Sequence[Node | None]:
UpperCamelCase_: list[Any] = []
def populate_output(UpperCAmelCase__ , UpperCAmelCase__ ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(UpperCAmelCase__ , UpperCAmelCase__ )
return output
def snake_case (UpperCAmelCase__ ) -> Sequence[Node | None] | list[Any]:
if root is None:
return []
UpperCamelCase_: list[Sequence[Node | None]] = []
UpperCamelCase_: Optional[int] = 0
UpperCamelCase_: Optional[Any] = height(UpperCAmelCase__ )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(UpperCAmelCase__ , UpperCAmelCase__ ) )
UpperCamelCase_: List[Any] = 1
else:
output.append(get_nodes_from_right_to_left(UpperCAmelCase__ , UpperCAmelCase__ ) )
UpperCamelCase_: str = 0
return output
def snake_case () -> None: # Main function for testing.
UpperCamelCase_: Union[str, Any] = make_tree()
print(F'''In-order Traversal: {inorder(UpperCAmelCase__ )}''' )
print(F'''Pre-order Traversal: {preorder(UpperCAmelCase__ )}''' )
print(F'''Post-order Traversal: {postorder(UpperCAmelCase__ )}''' , '\n' )
print(F'''Height of Tree: {height(UpperCAmelCase__ )}''' , '\n' )
print('Complete Level Order Traversal: ' )
print(level_order(UpperCAmelCase__ ) , '\n' )
print('Level-wise order Traversal: ' )
for level in range(1 , height(UpperCAmelCase__ ) + 1 ):
print(F'''Level {level}:''' , get_nodes_from_left_to_right(UpperCAmelCase__ , level=UpperCAmelCase__ ) )
print('\nZigZag order Traversal: ' )
print(zigzag(UpperCAmelCase__ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 57 |
from math import atan, cos, radians, sin, tan
from .haversine_distance import haversine_distance
_lowerCamelCase : Dict = 6_378_137.0
_lowerCamelCase : Union[str, Any] = 6_356_752.314_245
_lowerCamelCase : List[Any] = 6378137
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> float:
"""simple docstring"""
A__ = (AXIS_A - AXIS_B) / AXIS_A
# Parametric latitudes
# https://en.wikipedia.org/wiki/Latitude#Parametric_(or_reduced)_latitude
A__ = atan((1 - flattening) * tan(radians(lowercase_ ) ) )
A__ = atan((1 - flattening) * tan(radians(lowercase_ ) ) )
# Compute central angle between two points
# using haversine theta. sigma = haversine_distance / equatorial radius
A__ = haversine_distance(lowercase_ , lowercase_ , lowercase_ , lowercase_ ) / EQUATORIAL_RADIUS
# Intermediate P and Q values
A__ = (b_lata + b_lata) / 2
A__ = (b_lata - b_lata) / 2
# Intermediate X value
# X = (sigma - sin(sigma)) * sin^2Pcos^2Q / cos^2(sigma/2)
A__ = (sin(lowercase_ ) ** 2) * (cos(lowercase_ ) ** 2)
A__ = cos(sigma / 2 ) ** 2
A__ = (sigma - sin(lowercase_ )) * (x_numerator / x_demonimator)
# Intermediate Y value
# Y = (sigma + sin(sigma)) * cos^2Psin^2Q / sin^2(sigma/2)
A__ = (cos(lowercase_ ) ** 2) * (sin(lowercase_ ) ** 2)
A__ = sin(sigma / 2 ) ** 2
A__ = (sigma + sin(lowercase_ )) * (y_numerator / y_denominator)
return EQUATORIAL_RADIUS * (sigma - ((flattening / 2) * (x_value + y_value)))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 87 | 0 |
"""simple docstring"""
import tempfile
import unittest
from make_student import create_student_by_copying_alternating_layers
from transformers import AutoConfig
from transformers.file_utils import cached_property
from transformers.testing_utils import require_torch
A__ : Dict= """sshleifer/bart-tiny-random"""
A__ : Any= """patrickvonplaten/t5-tiny-random"""
@require_torch
class __lowerCamelCase ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ) -> List[str]:
return AutoConfig.from_pretrained(UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Dict:
UpperCamelCase__ = create_student_by_copying_alternating_layers(UpperCamelCase__ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.num_hidden_layers , 1 )
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
UpperCamelCase__ = create_student_by_copying_alternating_layers(UpperCamelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCamelCase__ )
def SCREAMING_SNAKE_CASE__ ( self ) -> Any:
UpperCamelCase__ = create_student_by_copying_alternating_layers(UpperCamelCase__ , tempfile.mkdtemp() , e=1 , d=UpperCamelCase__ )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , self.teacher_config.encoder_layers )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
UpperCamelCase__ = create_student_by_copying_alternating_layers(UpperCamelCase__ , tempfile.mkdtemp() , e=1 , d=1 )
self.assertEqual(student.config.encoder_layers , 1 )
self.assertEqual(student.config.decoder_layers , 1 )
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
with self.assertRaises(UpperCamelCase__ ):
create_student_by_copying_alternating_layers(UpperCamelCase__ , tempfile.mkdtemp() , e=UpperCamelCase__ , d=UpperCamelCase__ )
| 708 |
"""simple docstring"""
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
A__ : Optional[Any]= """Run commands across TPU VMs for initial setup before running `accelerate launch`."""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE=None ) -> Dict:
"""simple docstring"""
if subparsers is not None:
UpperCamelCase__ = subparsers.add_parser('tpu-config' , description=_description )
else:
UpperCamelCase__ = argparse.ArgumentParser('Accelerate tpu-config command' , description=_description )
# Core arguments
UpperCamelCase__ = parser.add_argument_group(
'Config Arguments' , 'Arguments that can be configured through `accelerate config`.' )
config_args.add_argument(
'--config_file' , type=SCREAMING_SNAKE_CASE , default=SCREAMING_SNAKE_CASE , help='Path to the config file to use for accelerate.' , )
config_args.add_argument(
'--tpu_name' , default=SCREAMING_SNAKE_CASE , help='The name of the TPU to use. If not specified, will use the TPU specified in the config file.' , )
config_args.add_argument(
'--tpu_zone' , default=SCREAMING_SNAKE_CASE , help='The zone of the TPU to use. If not specified, will use the zone specified in the config file.' , )
UpperCamelCase__ = parser.add_argument_group('TPU Arguments' , 'Arguments for options ran inside the TPU.' )
pod_args.add_argument(
'--use_alpha' , action='store_true' , help='Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.' , )
pod_args.add_argument(
'--command_file' , default=SCREAMING_SNAKE_CASE , help='The path to the file containing the commands to run on the pod on startup.' , )
pod_args.add_argument(
'--command' , action='append' , nargs='+' , help='A command to run on the pod. Can be passed multiple times.' , )
pod_args.add_argument(
'--install_accelerate' , action='store_true' , help='Whether to install accelerate on the pod. Defaults to False.' , )
pod_args.add_argument(
'--accelerate_version' , default='latest' , help='The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify \'dev\' to install from GitHub.' , )
pod_args.add_argument(
'--debug' , action='store_true' , help='If set, will print the command that would be run instead of running it.' )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE )
return parser
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> Dict:
"""simple docstring"""
UpperCamelCase__ = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
UpperCamelCase__ = defaults.command_file
if not args.command and defaults.commands is not None:
UpperCamelCase__ = defaults.commands
if not args.tpu_name:
UpperCamelCase__ = defaults.tpu_name
if not args.tpu_zone:
UpperCamelCase__ = defaults.tpu_zone
if args.accelerate_version == "dev":
UpperCamelCase__ = 'git+https://github.com/huggingface/accelerate.git'
elif args.accelerate_version == "latest":
UpperCamelCase__ = 'accelerate -U'
elif isinstance(parse(args.accelerate_version ) , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = F'accelerate=={args.accelerate_version}'
if not args.command_file and not args.command:
raise ValueError('You must specify either a command file or a command to run on the pod.' )
if args.command_file:
with open(args.command_file , 'r' ) as f:
UpperCamelCase__ = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , SCREAMING_SNAKE_CASE ):
UpperCamelCase__ = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
UpperCamelCase__ = ['cd /usr/share']
if args.install_accelerate:
new_cmd += [F'pip install {args.accelerate_version}']
new_cmd += args.command
UpperCamelCase__ = '; '.join(SCREAMING_SNAKE_CASE )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
UpperCamelCase__ = ['gcloud']
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F'Running {" ".join(SCREAMING_SNAKE_CASE )}' )
return
subprocess.run(SCREAMING_SNAKE_CASE )
print('Successfully setup pod.' )
def lowerCAmelCase_( ) -> int:
"""simple docstring"""
UpperCamelCase__ = tpu_command_parser()
UpperCamelCase__ = parser.parse_args()
tpu_command_launcher(SCREAMING_SNAKE_CASE )
| 20 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.