code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def _a ( lowerCAmelCase , lowerCAmelCase )-> float:
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(lowerCAmelCase , lowerCAmelCase ) ) )
def _a ( lowerCAmelCase , lowerCAmelCase )-> list[list[list[float] | float]]:
if dataset.ndim != value_array.ndim:
SCREAMING_SNAKE_CASE_ = (
'Wrong input data\'s dimensions... '
F'''dataset : {dataset.ndim}, value_array : {value_array.ndim}'''
)
raise ValueError(lowerCAmelCase )
try:
if dataset.shape[1] != value_array.shape[1]:
SCREAMING_SNAKE_CASE_ = (
'Wrong input data\'s shape... '
F'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'''
)
raise ValueError(lowerCAmelCase )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError('Wrong shape' )
if dataset.dtype != value_array.dtype:
SCREAMING_SNAKE_CASE_ = (
'Input data have different datatype... '
F'''dataset : {dataset.dtype}, value_array : {value_array.dtype}'''
)
raise TypeError(lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = []
for value in value_array:
SCREAMING_SNAKE_CASE_ = euclidean(lowerCAmelCase , dataset[0] )
SCREAMING_SNAKE_CASE_ = dataset[0].tolist()
for dataset_value in dataset[1:]:
SCREAMING_SNAKE_CASE_ = euclidean(lowerCAmelCase , lowerCAmelCase )
if dist > temp_dist:
SCREAMING_SNAKE_CASE_ = temp_dist
SCREAMING_SNAKE_CASE_ = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def _a ( lowerCAmelCase , lowerCAmelCase )-> float:
return np.dot(lowerCAmelCase , lowerCAmelCase ) / (norm(lowerCAmelCase ) * norm(lowerCAmelCase ))
if __name__ == "__main__":
import doctest
doctest.testmod() | 360 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
SCREAMING_SNAKE_CASE: List[Any] = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE: Optional[Any] = {
'''google/umt5-small''': '''https://huggingface.co/google/umt5-small/resolve/main/config.json''',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class lowercase_ (SCREAMING_SNAKE_CASE__ ):
lowerCAmelCase__ ="umt5"
lowerCAmelCase__ =["past_key_values"]
def __init__( self : Optional[Any] , snake_case__ : Union[str, Any]=25_01_12 , snake_case__ : Optional[int]=5_12 , snake_case__ : Optional[Any]=64 , snake_case__ : str=10_24 , snake_case__ : Dict=8 , snake_case__ : Union[str, Any]=None , snake_case__ : List[Any]=6 , snake_case__ : List[Any]=32 , snake_case__ : List[Any]=1_28 , snake_case__ : Optional[int]=0.1 , snake_case__ : List[str]=1e-6 , snake_case__ : Optional[Any]=1.0 , snake_case__ : Optional[int]="gated-gelu" , snake_case__ : Any=True , snake_case__ : List[Any]=True , snake_case__ : List[str]="T5Tokenizer" , snake_case__ : List[str]=True , snake_case__ : Tuple=0 , snake_case__ : Optional[int]=1 , snake_case__ : List[str]=0 , **snake_case__ : Any , ):
"""simple docstring"""
super().__init__(
is_encoder_decoder=snake_case__ , tokenizer_class=snake_case__ , tie_word_embeddings=snake_case__ , pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , **snake_case__ , )
SCREAMING_SNAKE_CASE_ = vocab_size
SCREAMING_SNAKE_CASE_ = d_model
SCREAMING_SNAKE_CASE_ = d_kv
SCREAMING_SNAKE_CASE_ = d_ff
SCREAMING_SNAKE_CASE_ = num_layers
SCREAMING_SNAKE_CASE_ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
SCREAMING_SNAKE_CASE_ = num_heads
SCREAMING_SNAKE_CASE_ = relative_attention_num_buckets
SCREAMING_SNAKE_CASE_ = relative_attention_max_distance
SCREAMING_SNAKE_CASE_ = dropout_rate
SCREAMING_SNAKE_CASE_ = layer_norm_epsilon
SCREAMING_SNAKE_CASE_ = initializer_factor
SCREAMING_SNAKE_CASE_ = feed_forward_proj
SCREAMING_SNAKE_CASE_ = use_cache
SCREAMING_SNAKE_CASE_ = self.feed_forward_proj.split('-' )
SCREAMING_SNAKE_CASE_ = act_info[-1]
SCREAMING_SNAKE_CASE_ = act_info[0] == 'gated'
if len(snake_case__ ) > 1 and act_info[0] != "gated" or len(snake_case__ ) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'' )
if feed_forward_proj == "gated-gelu":
SCREAMING_SNAKE_CASE_ = 'gelu_new'
@property
def __a ( self : Optional[Any] ):
"""simple docstring"""
return self.d_model
@property
def __a ( self : Union[str, Any] ):
"""simple docstring"""
return self.num_heads
@property
def __a ( self : Optional[Any] ):
"""simple docstring"""
return self.num_layers
class lowercase_ (SCREAMING_SNAKE_CASE__ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def __a ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
SCREAMING_SNAKE_CASE_ = 'past_encoder_sequence + sequence'
SCREAMING_SNAKE_CASE_ = {0: 'batch'}
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'decoder_sequence'}
SCREAMING_SNAKE_CASE_ = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(snake_case__ , direction='inputs' )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def __a ( self : Union[str, Any] ):
"""simple docstring"""
return 13
@property
def __a ( self : Dict ):
"""simple docstring"""
return 5e-4 | 360 | 1 |
import torch
from transformers import PreTrainedModel, XLMRobertaConfig, XLMRobertaModel
class lowercase__ (__UpperCAmelCase ):
"""simple docstring"""
__UpperCamelCase : Optional[int] = "M-CLIP"
def __init__( self : str , __a : Union[str, Any]=1_0_2_4 , __a : List[str]=7_6_8 , **__a : Tuple ):
snake_case__ : Optional[Any] = transformerDimSize
snake_case__ : Optional[Any] = imageDimSize
super().__init__(**_lowerCamelCase )
class lowercase__ (__UpperCAmelCase ):
"""simple docstring"""
__UpperCamelCase : List[Any] = MCLIPConfig
def __init__( self : int , __a : Dict , *__a : List[str] , **__a : str ):
super().__init__(_lowerCamelCase , *_lowerCamelCase , **_lowerCamelCase )
snake_case__ : Union[str, Any] = XLMRobertaModel(_lowerCamelCase )
snake_case__ : Optional[Any] = torch.nn.Linear(
in_features=config.transformerDimensions , out_features=config.numDims )
def lowercase ( self : List[Any] , __a : Optional[int] , __a : Tuple ):
snake_case__ : List[Any] = self.transformer(input_ids=_lowerCamelCase , attention_mask=_lowerCamelCase )[0]
snake_case__ : Tuple = (embs * attention_mask.unsqueeze(2 )).sum(dim=1 ) / attention_mask.sum(dim=1 )[:, None]
return self.LinearTransformation(_lowerCamelCase ), embs
| 701 |
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
lowercase_: List[Any] = logging.get_logger(__name__)
class lowercase__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , __a : Union[str, Any] , __a : Dict ):
snake_case__ : List[str] = question_encoder
snake_case__ : Union[str, Any] = generator
snake_case__ : List[Any] = self.question_encoder
def lowercase ( self : Dict , __a : Dict ):
if os.path.isfile(__a ):
raise ValueError(f'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(__a , exist_ok=__a )
snake_case__ : Union[str, Any] = os.path.join(__a , """question_encoder_tokenizer""" )
snake_case__ : Tuple = os.path.join(__a , """generator_tokenizer""" )
self.question_encoder.save_pretrained(__a )
self.generator.save_pretrained(__a )
@classmethod
def lowercase ( cls : Any , __a : str , **__a : Union[str, Any] ):
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
snake_case__ : List[str] = kwargs.pop("""config""" , __a )
if config is None:
snake_case__ : Union[str, Any] = RagConfig.from_pretrained(__a )
snake_case__ : int = AutoTokenizer.from_pretrained(
__a , config=config.question_encoder , subfolder="""question_encoder_tokenizer""" )
snake_case__ : Any = AutoTokenizer.from_pretrained(
__a , config=config.generator , subfolder="""generator_tokenizer""" )
return cls(question_encoder=__a , generator=__a )
def __call__( self : Dict , *__a : List[str] , **__a : List[Any] ):
return self.current_tokenizer(*__a , **__a )
def lowercase ( self : Union[str, Any] , *__a : Dict , **__a : Optional[int] ):
return self.generator.batch_decode(*__a , **__a )
def lowercase ( self : Tuple , *__a : Tuple , **__a : str ):
return self.generator.decode(*__a , **__a )
def lowercase ( self : List[str] ):
snake_case__ : List[Any] = self.question_encoder
def lowercase ( self : int ):
snake_case__ : Optional[int] = self.generator
def lowercase ( self : str , __a : List[str] , __a : Optional[List[str]] = None , __a : Optional[int] = None , __a : Optional[int] = None , __a : str = "longest" , __a : str = None , __a : bool = True , **__a : str , ):
warnings.warn(
"""`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the """
"""regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` """
"""context manager to prepare your targets. See the documentation of your specific tokenizer for more """
"""details""" , __a , )
if max_length is None:
snake_case__ : Optional[Any] = self.current_tokenizer.model_max_length
snake_case__ : Any = self(
__a , add_special_tokens=__a , return_tensors=__a , max_length=__a , padding=__a , truncation=__a , **__a , )
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
snake_case__ : Optional[int] = self.current_tokenizer.model_max_length
snake_case__ : str = self(
text_target=__a , add_special_tokens=__a , return_tensors=__a , padding=__a , max_length=__a , truncation=__a , **__a , )
snake_case__ : Optional[Any] = labels["""input_ids"""]
return model_inputs
| 127 | 0 |
'''simple docstring'''
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
_lowercase = None
try:
import msvcrt
except ImportError:
_lowercase = None
try:
import fcntl
except ImportError:
_lowercase = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
_lowercase = OSError
# Data
# ------------------------------------------------
_lowercase = [
"""Timeout""",
"""BaseFileLock""",
"""WindowsFileLock""",
"""UnixFileLock""",
"""SoftFileLock""",
"""FileLock""",
]
_lowercase = """3.0.12"""
_lowercase = None
def lowerCamelCase__ ( ):
global _logger
__snake_case = _logger or logging.getLogger(__name__ )
return _logger
class a_ ( UpperCAmelCase__ ):
def __init__( self : Union[str, Any] , __lowerCAmelCase : List[Any] ):
__snake_case = lock_file
return None
def __str__( self : str ):
__snake_case = F'The file lock \'{self.lock_file}\' could not be acquired.'
return temp
class a_ :
def __init__( self : Tuple , __lowerCAmelCase : Any ):
__snake_case = lock
return None
def __enter__( self : List[str] ):
return self.lock
def __exit__( self : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Union[str, Any] , __lowerCAmelCase : Optional[int] ):
self.lock.release()
return None
class a_ :
def __init__( self : str , __lowerCAmelCase : Tuple , __lowerCAmelCase : str=-1 , __lowerCAmelCase : Optional[int]=None ):
__snake_case = max_filename_length if max_filename_length is not None else 2_5_5
# Hash the filename if it's too long
__snake_case = self.hash_filename_if_too_long(__lowerCAmelCase , __lowerCAmelCase )
# The path to the lock file.
__snake_case = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
__snake_case = None
# The default timeout value.
__snake_case = timeout
# We use this lock primarily for the lock counter.
__snake_case = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
__snake_case = 0
return None
@property
def lowercase__ ( self : List[str] ):
return self._lock_file
@property
def lowercase__ ( self : List[str] ):
return self._timeout
@timeout.setter
def lowercase__ ( self : Union[str, Any] , __lowerCAmelCase : Any ):
__snake_case = float(__lowerCAmelCase )
return None
def lowercase__ ( self : str ):
raise NotImplementedError()
def lowercase__ ( self : List[str] ):
raise NotImplementedError()
@property
def lowercase__ ( self : Optional[Any] ):
return self._lock_file_fd is not None
def lowercase__ ( self : List[Any] , __lowerCAmelCase : List[Any]=None , __lowerCAmelCase : str=0.05 ):
# Use the default timeout, if no timeout is provided.
if timeout is None:
__snake_case = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
__snake_case = id(self )
__snake_case = self._lock_file
__snake_case = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F'Attempting to acquire lock {lock_id} on {lock_filename}' )
self._acquire()
if self.is_locked:
logger().debug(F'Lock {lock_id} acquired on {lock_filename}' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F'Timeout on acquiring lock {lock_id} on {lock_filename}' )
raise Timeout(self._lock_file )
else:
logger().debug(
F'Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...' )
time.sleep(__lowerCAmelCase )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
__snake_case = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def lowercase__ ( self : Tuple , __lowerCAmelCase : Tuple=False ):
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
__snake_case = id(self )
__snake_case = self._lock_file
logger().debug(F'Attempting to release lock {lock_id} on {lock_filename}' )
self._release()
__snake_case = 0
logger().debug(F'Lock {lock_id} released on {lock_filename}' )
return None
def __enter__( self : str ):
self.acquire()
return self
def __exit__( self : List[Any] , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Tuple ):
self.release()
return None
def __del__( self : List[Any] ):
self.release(force=__lowerCAmelCase )
return None
def lowercase__ ( self : Union[str, Any] , __lowerCAmelCase : str , __lowerCAmelCase : int ):
__snake_case = os.path.basename(__lowerCAmelCase )
if len(__lowerCAmelCase ) > max_length and max_length > 0:
__snake_case = os.path.dirname(__lowerCAmelCase )
__snake_case = str(hash(__lowerCAmelCase ) )
__snake_case = filename[: max_length - len(__lowerCAmelCase ) - 8] + '...' + hashed_filename + '.lock'
return os.path.join(__lowerCAmelCase , __lowerCAmelCase )
else:
return path
class a_ ( UpperCAmelCase__ ):
def __init__( self : Dict , __lowerCAmelCase : Tuple , __lowerCAmelCase : List[str]=-1 , __lowerCAmelCase : int=None ):
from .file_utils import relative_to_absolute_path
super().__init__(__lowerCAmelCase , timeout=__lowerCAmelCase , max_filename_length=__lowerCAmelCase )
__snake_case = '\\\\?\\' + relative_to_absolute_path(self.lock_file )
def lowercase__ ( self : Any ):
__snake_case = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
__snake_case = os.open(self._lock_file , __lowerCAmelCase )
except OSError:
pass
else:
try:
msvcrt.locking(__lowerCAmelCase , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__lowerCAmelCase )
else:
__snake_case = fd
return None
def lowercase__ ( self : Any ):
__snake_case = self._lock_file_fd
__snake_case = None
msvcrt.locking(__lowerCAmelCase , msvcrt.LK_UNLCK , 1 )
os.close(__lowerCAmelCase )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class a_ ( UpperCAmelCase__ ):
def __init__( self : List[str] , __lowerCAmelCase : str , __lowerCAmelCase : int=-1 , __lowerCAmelCase : Optional[int]=None ):
__snake_case = os.statvfs(os.path.dirname(__lowerCAmelCase ) ).f_namemax
super().__init__(__lowerCAmelCase , timeout=__lowerCAmelCase , max_filename_length=__lowerCAmelCase )
def lowercase__ ( self : Optional[int] ):
__snake_case = os.O_RDWR | os.O_CREAT | os.O_TRUNC
__snake_case = os.open(self._lock_file , __lowerCAmelCase )
try:
fcntl.flock(__lowerCAmelCase , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__lowerCAmelCase )
else:
__snake_case = fd
return None
def lowercase__ ( self : int ):
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
__snake_case = self._lock_file_fd
__snake_case = None
fcntl.flock(__lowerCAmelCase , fcntl.LOCK_UN )
os.close(__lowerCAmelCase )
return None
class a_ ( UpperCAmelCase__ ):
def lowercase__ ( self : Union[str, Any] ):
__snake_case = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
__snake_case = os.open(self._lock_file , __lowerCAmelCase )
except OSError:
pass
else:
__snake_case = fd
return None
def lowercase__ ( self : List[Any] ):
os.close(self._lock_file_fd )
__snake_case = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
_lowercase = None
if msvcrt:
_lowercase = WindowsFileLock
elif fcntl:
_lowercase = UnixFileLock
else:
_lowercase = SoftFileLock
if warnings is not None:
warnings.warn("""only soft file lock is available""")
| 356 |
'''simple docstring'''
from .configuration_bert_masked import MaskedBertConfig
from .modeling_bert_masked import (
MaskedBertForMultipleChoice,
MaskedBertForQuestionAnswering,
MaskedBertForSequenceClassification,
MaskedBertForTokenClassification,
MaskedBertModel,
)
from .modules import *
| 356 | 1 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class UpperCamelCase ( lowercase_ , unittest.TestCase ):
lowercase = RobertaTokenizer
lowercase = RobertaTokenizerFast
lowercase = True
lowercase = {'cls_token': '<s>'}
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowercase_ : str = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
lowercase_ : str = dict(zip(__UpperCamelCase ,range(len(__UpperCamelCase ) ) ) )
lowercase_ : Union[str, Any] = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
lowercase_ : Dict = {'unk_token': '<unk>'}
lowercase_ : Optional[Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
lowercase_ : Dict = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(__UpperCamelCase ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(__UpperCamelCase ) )
def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,**__UpperCamelCase ) -> Any:
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname ,**__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> int:
'''simple docstring'''
lowercase_ : int = 'lower newer'
lowercase_ : Any = 'lower newer'
return input_text, output_text
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : int = self.tokenizer_class(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
lowercase_ : Union[str, Any] = 'lower newer'
lowercase_ : Union[str, Any] = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
lowercase_ : List[Any] = tokenizer.tokenize(__UpperCamelCase ) # , add_prefix_space=True)
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : List[str] = tokens + [tokenizer.unk_token]
lowercase_ : str = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
lowercase_ : List[Any] = self.get_tokenizer()
self.assertListEqual(tokenizer.encode('Hello world!' ,add_special_tokens=__UpperCamelCase ) ,[0, 3_1414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode('Hello world! cécé herlolip 418' ,add_special_tokens=__UpperCamelCase ) ,[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2] ,)
@slow
def _UpperCAmelCase ( self ) -> Any:
'''simple docstring'''
lowercase_ : Optional[int] = self.tokenizer_class.from_pretrained('roberta-base' )
lowercase_ : List[Any] = tokenizer.encode('sequence builders' ,add_special_tokens=__UpperCamelCase )
lowercase_ : Optional[Any] = tokenizer.encode('multi-sequence build' ,add_special_tokens=__UpperCamelCase )
lowercase_ : Optional[Any] = tokenizer.encode(
'sequence builders' ,add_special_tokens=__UpperCamelCase ,add_prefix_space=__UpperCamelCase )
lowercase_ : Any = tokenizer.encode(
'sequence builders' ,'multi-sequence build' ,add_special_tokens=__UpperCamelCase ,add_prefix_space=__UpperCamelCase )
lowercase_ : Optional[int] = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase )
lowercase_ : List[Any] = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase ,__UpperCamelCase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _UpperCAmelCase ( self ) -> List[str]:
'''simple docstring'''
lowercase_ : Union[str, Any] = self.get_tokenizer()
lowercase_ : Dict = 'Encode this sequence.'
lowercase_ : Optional[int] = tokenizer.byte_encoder[' '.encode('utf-8' )[0]]
# Testing encoder arguments
lowercase_ : str = tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ,add_prefix_space=__UpperCamelCase )
lowercase_ : List[str] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : int = tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ,add_prefix_space=__UpperCamelCase )
lowercase_ : str = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(__UpperCamelCase ,__UpperCamelCase )
tokenizer.add_special_tokens({'bos_token': '<s>'} )
lowercase_ : Dict = tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase )
lowercase_ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(__UpperCamelCase ,__UpperCamelCase )
# Testing spaces after special tokens
lowercase_ : List[Any] = '<mask>'
tokenizer.add_special_tokens(
{'mask_token': AddedToken(__UpperCamelCase ,lstrip=__UpperCamelCase ,rstrip=__UpperCamelCase )} ) # mask token has a left space
lowercase_ : Tuple = tokenizer.convert_tokens_to_ids(__UpperCamelCase )
lowercase_ : int = 'Encode <mask> sequence'
lowercase_ : List[str] = 'Encode <mask>sequence'
lowercase_ : Union[str, Any] = tokenizer.encode(__UpperCamelCase )
lowercase_ : Union[str, Any] = encoded.index(__UpperCamelCase )
lowercase_ : List[Any] = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(__UpperCamelCase ,__UpperCamelCase )
lowercase_ : List[str] = tokenizer.encode(__UpperCamelCase )
lowercase_ : Any = encoded.index(__UpperCamelCase )
lowercase_ : Dict = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(__UpperCamelCase ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> List[Any]:
'''simple docstring'''
pass
def _UpperCAmelCase ( self ) -> Union[str, Any]:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase_ : List[Any] = self.rust_tokenizer_class.from_pretrained(__UpperCamelCase ,**__UpperCamelCase )
lowercase_ : List[str] = self.tokenizer_class.from_pretrained(__UpperCamelCase ,**__UpperCamelCase )
lowercase_ : Union[str, Any] = 'A, <mask> AllenNLP sentence.'
lowercase_ : Union[str, Any] = tokenizer_r.encode_plus(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ,return_token_type_ids=__UpperCamelCase )
lowercase_ : Optional[int] = tokenizer_p.encode_plus(__UpperCamelCase ,add_special_tokens=__UpperCamelCase ,return_token_type_ids=__UpperCamelCase )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['token_type_ids'] ) ,sum(tokens_p['token_type_ids'] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['attention_mask'] ) / len(tokens_r['attention_mask'] ) ,sum(tokens_p['attention_mask'] ) / len(tokens_p['attention_mask'] ) ,)
lowercase_ : Dict = tokenizer_r.convert_ids_to_tokens(tokens_r['input_ids'] )
lowercase_ : Tuple = tokenizer_p.convert_ids_to_tokens(tokens_p['input_ids'] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['input_ids'] ,[0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r['input_ids'] ,[0, 250, 6, 5_0264, 3823, 487, 2_1992, 3645, 4, 2] )
self.assertSequenceEqual(
__UpperCamelCase ,['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
self.assertSequenceEqual(
__UpperCamelCase ,['<s>', 'A', ',', '<mask>', 'ĠAllen', 'N', 'LP', 'Ġsentence', '.', '</s>'] )
def _UpperCAmelCase ( self ) -> Optional[int]:
'''simple docstring'''
for trim_offsets, add_prefix_space in itertools.product([True, False] ,repeat=2 ):
lowercase_ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname ,use_fast=__UpperCamelCase ,add_prefix_space=__UpperCamelCase ,trim_offsets=__UpperCamelCase )
lowercase_ : Union[str, Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
lowercase_ : int = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state['add_prefix_space'] ,__UpperCamelCase )
self.assertEqual(post_processor_state['add_prefix_space'] ,__UpperCamelCase )
self.assertEqual(post_processor_state['trim_offsets'] ,__UpperCamelCase )
def _UpperCAmelCase ( self ) -> Dict:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
lowercase_ : int = 'hello' # `hello` is a token in the vocabulary of `pretrained_name`
lowercase_ : List[str] = f'''{text_of_1_token} {text_of_1_token}'''
lowercase_ : List[Any] = self.rust_tokenizer_class.from_pretrained(
__UpperCamelCase ,use_fast=__UpperCamelCase ,add_prefix_space=__UpperCamelCase ,trim_offsets=__UpperCamelCase )
lowercase_ : Any = tokenizer_r(__UpperCamelCase ,return_offsets_mapping=__UpperCamelCase ,add_special_tokens=__UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(__UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(__UpperCamelCase ) + 1, len(__UpperCamelCase ) + 1 + len(__UpperCamelCase )) ,)
lowercase_ : Dict = self.rust_tokenizer_class.from_pretrained(
__UpperCamelCase ,use_fast=__UpperCamelCase ,add_prefix_space=__UpperCamelCase ,trim_offsets=__UpperCamelCase )
lowercase_ : Optional[Any] = tokenizer_r(__UpperCamelCase ,return_offsets_mapping=__UpperCamelCase ,add_special_tokens=__UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(__UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(__UpperCamelCase ) + 1, len(__UpperCamelCase ) + 1 + len(__UpperCamelCase )) ,)
lowercase_ : List[str] = self.rust_tokenizer_class.from_pretrained(
__UpperCamelCase ,use_fast=__UpperCamelCase ,add_prefix_space=__UpperCamelCase ,trim_offsets=__UpperCamelCase )
lowercase_ : Optional[Any] = tokenizer_r(__UpperCamelCase ,return_offsets_mapping=__UpperCamelCase ,add_special_tokens=__UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(__UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(__UpperCamelCase ), len(__UpperCamelCase ) + 1 + len(__UpperCamelCase )) ,)
lowercase_ : List[Any] = self.rust_tokenizer_class.from_pretrained(
__UpperCamelCase ,use_fast=__UpperCamelCase ,add_prefix_space=__UpperCamelCase ,trim_offsets=__UpperCamelCase )
lowercase_ : Optional[int] = tokenizer_r(__UpperCamelCase ,return_offsets_mapping=__UpperCamelCase ,add_special_tokens=__UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] ,(0, len(__UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(len(__UpperCamelCase ), len(__UpperCamelCase ) + 1 + len(__UpperCamelCase )) ,)
lowercase_ : int = f''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
lowercase_ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
__UpperCamelCase ,use_fast=__UpperCamelCase ,add_prefix_space=__UpperCamelCase ,trim_offsets=__UpperCamelCase )
lowercase_ : Tuple = tokenizer_r(__UpperCamelCase ,return_offsets_mapping=__UpperCamelCase ,add_special_tokens=__UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] ,(1, 1 + len(__UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(__UpperCamelCase ) + 1, 1 + len(__UpperCamelCase ) + 1 + len(__UpperCamelCase )) ,)
lowercase_ : List[str] = self.rust_tokenizer_class.from_pretrained(
__UpperCamelCase ,use_fast=__UpperCamelCase ,add_prefix_space=__UpperCamelCase ,trim_offsets=__UpperCamelCase )
lowercase_ : int = tokenizer_r(__UpperCamelCase ,return_offsets_mapping=__UpperCamelCase ,add_special_tokens=__UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] ,(0, 1 + len(__UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(__UpperCamelCase ), 1 + len(__UpperCamelCase ) + 1 + len(__UpperCamelCase )) ,)
lowercase_ : int = self.rust_tokenizer_class.from_pretrained(
__UpperCamelCase ,use_fast=__UpperCamelCase ,add_prefix_space=__UpperCamelCase ,trim_offsets=__UpperCamelCase )
lowercase_ : List[Any] = tokenizer_r(__UpperCamelCase ,return_offsets_mapping=__UpperCamelCase ,add_special_tokens=__UpperCamelCase )
self.assertEqual(encoding.offset_mapping[0] ,(0, 1 + len(__UpperCamelCase )) )
self.assertEqual(
encoding.offset_mapping[1] ,(1 + len(__UpperCamelCase ), 1 + len(__UpperCamelCase ) + 1 + len(__UpperCamelCase )) ,)
| 713 | """simple docstring"""
from random import randint, random
def lowercase__( __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : bool = False , __SCREAMING_SNAKE_CASE : int = 5 , ):
lowercase_ : Union[str, Any] = [[-1] * number_of_cells] # Create a highway without any car
lowercase_ : str = 0
lowercase_ : List[Any] = max(__SCREAMING_SNAKE_CASE , 0 )
while i < number_of_cells:
lowercase_ : Union[str, Any] = (
randint(0 , __SCREAMING_SNAKE_CASE ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def lowercase__( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : int ):
lowercase_ : Union[str, Any] = 0
lowercase_ : Dict = highway_now[car_index + 1 :]
for cell in range(len(__SCREAMING_SNAKE_CASE ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(__SCREAMING_SNAKE_CASE , -1 )
def lowercase__( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : int ):
lowercase_ : List[Any] = len(__SCREAMING_SNAKE_CASE )
# Beforce calculations, the highway is empty
lowercase_ : Dict = [-1] * number_of_cells
for car_index in range(__SCREAMING_SNAKE_CASE ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
lowercase_ : Any = min(highway_now[car_index] + 1 , __SCREAMING_SNAKE_CASE )
# Number of empty cell before the next car
lowercase_ : List[Any] = get_distance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) - 1
# We can't have the car causing an accident
lowercase_ : Dict = min(next_highway[car_index] , __SCREAMING_SNAKE_CASE )
if random() < probability:
# Randomly, a driver will slow down
lowercase_ : Optional[Any] = max(next_highway[car_index] - 1 , 0 )
return next_highway
def lowercase__( __SCREAMING_SNAKE_CASE : list , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : float , __SCREAMING_SNAKE_CASE : int ):
lowercase_ : Union[str, Any] = len(highway[0] )
for i in range(__SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[Any] = update(highway[i] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowercase_ : List[str] = [-1] * number_of_cells
for car_index in range(__SCREAMING_SNAKE_CASE ):
lowercase_ : Optional[Any] = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
lowercase_ : Any = (car_index + speed) % number_of_cells
# Commit the change of position
lowercase_ : Optional[Any] = speed
highway.append(__SCREAMING_SNAKE_CASE )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 477 | 0 |
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class _UpperCAmelCase ( unittest.TestCase ):
def __init__( self : List[Any] , UpperCAmelCase : str , UpperCAmelCase : int=13 , UpperCAmelCase : Dict=7 , UpperCAmelCase : Any=True , UpperCAmelCase : int=True , UpperCAmelCase : List[str]=True , UpperCAmelCase : int=True , UpperCAmelCase : List[str]=99 , UpperCAmelCase : Tuple=32 , UpperCAmelCase : Dict=5 , UpperCAmelCase : Any=4 , UpperCAmelCase : Any=37 , UpperCAmelCase : Optional[int]="gelu" , UpperCAmelCase : Optional[int]=0.1 , UpperCAmelCase : Optional[Any]=0.1 , UpperCAmelCase : Tuple=5_12 , UpperCAmelCase : List[Any]=16 , UpperCAmelCase : Union[str, Any]=2 , UpperCAmelCase : Any=0.02 , UpperCAmelCase : Optional[int]=4 , ):
SCREAMING_SNAKE_CASE_ :int = parent
SCREAMING_SNAKE_CASE_ :str = batch_size
SCREAMING_SNAKE_CASE_ :List[str] = seq_length
SCREAMING_SNAKE_CASE_ :int = is_training
SCREAMING_SNAKE_CASE_ :int = use_attention_mask
SCREAMING_SNAKE_CASE_ :Union[str, Any] = use_token_type_ids
SCREAMING_SNAKE_CASE_ :str = use_labels
SCREAMING_SNAKE_CASE_ :List[str] = vocab_size
SCREAMING_SNAKE_CASE_ :Optional[int] = hidden_size
SCREAMING_SNAKE_CASE_ :int = num_hidden_layers
SCREAMING_SNAKE_CASE_ :str = num_attention_heads
SCREAMING_SNAKE_CASE_ :List[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ :Any = hidden_act
SCREAMING_SNAKE_CASE_ :Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ :Union[str, Any] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ :Tuple = max_position_embeddings
SCREAMING_SNAKE_CASE_ :str = type_vocab_size
SCREAMING_SNAKE_CASE_ :Optional[int] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ :Dict = initializer_range
SCREAMING_SNAKE_CASE_ :str = num_choices
def _snake_case ( self : List[str]):
SCREAMING_SNAKE_CASE_ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
SCREAMING_SNAKE_CASE_ :int = None
if self.use_attention_mask:
SCREAMING_SNAKE_CASE_ :int = random_attention_mask([self.batch_size, self.seq_length])
SCREAMING_SNAKE_CASE_ :List[str] = None
if self.use_token_type_ids:
SCREAMING_SNAKE_CASE_ :Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
SCREAMING_SNAKE_CASE_ :List[str] = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCAmelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _snake_case ( self : Optional[Any]):
SCREAMING_SNAKE_CASE_ :Union[str, Any] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Dict = config_and_inputs
SCREAMING_SNAKE_CASE_ :Union[str, Any] = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def _snake_case ( self : Optional[int]):
SCREAMING_SNAKE_CASE_ :int = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ :Optional[int] = config_and_inputs
SCREAMING_SNAKE_CASE_ :Tuple = True
SCREAMING_SNAKE_CASE_ :str = floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
SCREAMING_SNAKE_CASE_ :List[Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class _UpperCAmelCase ( lowercase , unittest.TestCase ):
lowerCamelCase_ : int = True
lowerCamelCase_ : Optional[Any] = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _snake_case ( self : List[Any]):
SCREAMING_SNAKE_CASE_ :Optional[Any] = FlaxRobertaModelTester(self)
@slow
def _snake_case ( self : Union[str, Any]):
for model_class_name in self.all_model_classes:
SCREAMING_SNAKE_CASE_ :Optional[int] = model_class_name.from_pretrained("roberta-base" , from_pt=UpperCAmelCase)
SCREAMING_SNAKE_CASE_ :Union[str, Any] = model(np.ones((1, 1)))
self.assertIsNotNone(UpperCAmelCase)
| 631 |
def lowercase ( a = 50 ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ :int = [1] * (length + 1)
for row_length in range(3 , length + 1 ):
for block_length in range(3 , row_length + 1 ):
for block_start in range(row_length - block_length ):
ways_number[row_length] += ways_number[
row_length - block_start - block_length - 1
]
ways_number[row_length] += 1
return ways_number[length]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 631 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_deformable_detr import DeformableDetrImageProcessor
lowercase__ = logging.get_logger(__name__)
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
def __init__( self , *lowercase , **lowercase ):
warnings.warn(
'The class DeformableDetrFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use DeformableDetrImageProcessor instead.' , lowercase , )
super().__init__(*lowercase , **lowercase ) | 492 |
"""simple docstring"""
from __future__ import annotations
lowercase__ = 10
def _snake_case ( lowercase__ ):
_lowerCamelCase : List[Any] = 1
_lowerCamelCase : Optional[int] = max(lowercase__ )
while placement <= max_digit:
# declare and initialize empty buckets
_lowerCamelCase : list[list] = [[] for _ in range(lowercase__ )]
# split list_of_ints between the buckets
for i in list_of_ints:
_lowerCamelCase : Dict = int((i / placement) % RADIX )
buckets[tmp].append(lowercase__ )
# put each buckets' contents into list_of_ints
_lowerCamelCase : str = 0
for b in range(lowercase__ ):
for i in buckets[b]:
_lowerCamelCase : str = i
a += 1
# move to next
placement *= RADIX
return list_of_ints
if __name__ == "__main__":
import doctest
doctest.testmod() | 492 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowerCAmelCase_ : Union[str, Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
UpperCAmelCase__ = '''maskformer-swin'''
UpperCAmelCase__ = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self : Optional[int] , lowercase__ : List[Any]=224 , lowercase__ : Optional[Any]=4 , lowercase__ : Optional[Any]=3 , lowercase__ : List[str]=96 , lowercase__ : Dict=[2, 2, 6, 2] , lowercase__ : Tuple=[3, 6, 12, 24] , lowercase__ : Optional[Any]=7 , lowercase__ : Any=4.0 , lowercase__ : List[str]=True , lowercase__ : Optional[int]=0.0 , lowercase__ : Dict=0.0 , lowercase__ : Tuple=0.1 , lowercase__ : Any="gelu" , lowercase__ : Union[str, Any]=False , lowercase__ : Optional[int]=0.0_2 , lowercase__ : Tuple=1e-5 , lowercase__ : Dict=None , lowercase__ : List[Any]=None , **lowercase__ : Optional[Any] , ) ->List[str]:
'''simple docstring'''
super().__init__(**lowercase__ )
_UpperCamelCase : List[Any] = image_size
_UpperCamelCase : Any = patch_size
_UpperCamelCase : Union[str, Any] = num_channels
_UpperCamelCase : Dict = embed_dim
_UpperCamelCase : List[Any] = depths
_UpperCamelCase : str = len(lowercase__ )
_UpperCamelCase : List[Any] = num_heads
_UpperCamelCase : str = window_size
_UpperCamelCase : Optional[Any] = mlp_ratio
_UpperCamelCase : Optional[Any] = qkv_bias
_UpperCamelCase : str = hidden_dropout_prob
_UpperCamelCase : List[Any] = attention_probs_dropout_prob
_UpperCamelCase : Union[str, Any] = drop_path_rate
_UpperCamelCase : str = hidden_act
_UpperCamelCase : Any = use_absolute_embeddings
_UpperCamelCase : Tuple = layer_norm_eps
_UpperCamelCase : Union[str, Any] = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_UpperCamelCase : Dict = int(embed_dim * 2 ** (len(lowercase__ ) - 1) )
_UpperCamelCase : List[Any] = ["stem"] + [f'''stage{idx}''' for idx in range(1 , len(lowercase__ ) + 1 )]
_UpperCamelCase , _UpperCamelCase : Optional[Any] = get_aligned_output_features_output_indices(
out_features=lowercase__ , out_indices=lowercase__ , stage_names=self.stage_names )
| 435 | '''simple docstring'''
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
lowerCAmelCase_ : Optional[Any] = """python tqdm regex requests packaging filelock numpy tokenizers""".split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append("""dataclasses""")
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append("""importlib_metadata""")
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def __A ( UpperCAmelCase ,UpperCAmelCase=None ) -> int:
'''simple docstring'''
require_version(deps[pkg] ,UpperCAmelCase )
| 435 | 1 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from argparse import ArgumentParser
from accelerate.commands.config import get_config_parser
from accelerate.commands.env import env_command_parser
from accelerate.commands.launch import launch_command_parser
from accelerate.commands.test import test_command_parser
from accelerate.commands.tpu import tpu_command_parser
def _snake_case ( ):
UpperCAmelCase : int = ArgumentParser("""Accelerate CLI tool""" , usage="""accelerate <command> [<args>]""" , allow_abbrev=UpperCAmelCase__ )
UpperCAmelCase : List[Any] = parser.add_subparsers(help="""accelerate command helpers""" )
# Register commands
get_config_parser(subparsers=UpperCAmelCase__ )
env_command_parser(subparsers=UpperCAmelCase__ )
launch_command_parser(subparsers=UpperCAmelCase__ )
tpu_command_parser(subparsers=UpperCAmelCase__ )
test_command_parser(subparsers=UpperCAmelCase__ )
# Let's go
UpperCAmelCase : str = parser.parse_args()
if not hasattr(UpperCAmelCase__ , """func""" ):
parser.print_help()
exit(1 )
# Run
args.func(UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 710 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
A: Union[str, Any] = {
"configuration_speecht5": [
"SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP",
"SpeechT5Config",
"SpeechT5HifiGanConfig",
],
"feature_extraction_speecht5": ["SpeechT5FeatureExtractor"],
"processing_speecht5": ["SpeechT5Processor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: List[str] = ["SpeechT5Tokenizer"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A: Dict = [
"SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST",
"SpeechT5ForSpeechToText",
"SpeechT5ForSpeechToSpeech",
"SpeechT5ForTextToSpeech",
"SpeechT5Model",
"SpeechT5PreTrainedModel",
"SpeechT5HifiGan",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
A: Dict = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 359 | 0 |
import math
from collections.abc import Callable
def _a ( lowercase__ : Callable[[float], float] , lowercase__ : float , lowercase__ : float ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : float = xa
SCREAMING_SNAKE_CASE__ : float = xa
while True:
if x_n == x_na or function(lowercase__ ) == function(lowercase__ ):
raise ZeroDivisionError('float division by zero, could not find root' )
SCREAMING_SNAKE_CASE__ : float = x_na - (
function(lowercase__ ) / ((function(lowercase__ ) - function(lowercase__ )) / (x_na - x_n))
)
if abs(x_na - x_na ) < 10**-5:
return x_na
SCREAMING_SNAKE_CASE__ : Dict = x_na
SCREAMING_SNAKE_CASE__ : List[str] = x_na
def _a ( lowercase__ : float ):
'''simple docstring'''
return math.pow(lowercase__ , 3 ) - (2 * x) - 5
if __name__ == "__main__":
print(intersection(f, 3, 3.5))
| 85 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
_UpperCamelCase = logging.get_logger(__name__)
_UpperCamelCase = {
'Helsinki-NLP/opus-mt-en-de': 'https://huggingface.co/Helsinki-NLP/opus-mt-en-de/resolve/main/config.json',
# See all Marian models at https://huggingface.co/models?filter=marian
}
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
__snake_case : Tuple = """marian"""
__snake_case : Any = ["""past_key_values"""]
__snake_case : Optional[Any] = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self :List[Any] , __lowercase :Any=5_8101 , __lowercase :Tuple=None , __lowercase :Union[str, Any]=1024 , __lowercase :Dict=12 , __lowercase :int=4096 , __lowercase :int=16 , __lowercase :List[Any]=12 , __lowercase :Dict=4096 , __lowercase :Dict=16 , __lowercase :Tuple=0.0 , __lowercase :Tuple=0.0 , __lowercase :List[Any]=True , __lowercase :int=True , __lowercase :Tuple="gelu" , __lowercase :str=1024 , __lowercase :Optional[int]=0.1 , __lowercase :List[str]=0.0 , __lowercase :Union[str, Any]=0.0 , __lowercase :Dict=0.02 , __lowercase :Tuple=5_8100 , __lowercase :Optional[Any]=False , __lowercase :int=5_8100 , __lowercase :Any=0 , __lowercase :str=0 , __lowercase :str=True , **__lowercase :Any , ):
__lowerCamelCase : List[Any] =vocab_size
__lowerCamelCase : Optional[int] =decoder_vocab_size or vocab_size
__lowerCamelCase : Tuple =max_position_embeddings
__lowerCamelCase : List[Any] =d_model
__lowerCamelCase : Any =encoder_ffn_dim
__lowerCamelCase : str =encoder_layers
__lowerCamelCase : List[str] =encoder_attention_heads
__lowerCamelCase : str =decoder_ffn_dim
__lowerCamelCase : Tuple =decoder_layers
__lowerCamelCase : Any =decoder_attention_heads
__lowerCamelCase : List[Any] =dropout
__lowerCamelCase : Any =attention_dropout
__lowerCamelCase : Union[str, Any] =activation_dropout
__lowerCamelCase : Optional[int] =activation_function
__lowerCamelCase : Dict =init_std
__lowerCamelCase : List[Any] =encoder_layerdrop
__lowerCamelCase : Optional[Any] =decoder_layerdrop
__lowerCamelCase : Any =use_cache
__lowerCamelCase : Any =encoder_layers
__lowerCamelCase : Optional[int] =scale_embedding # scale factor will be sqrt(d_model) if True
__lowerCamelCase : int =share_encoder_decoder_embeddings
super().__init__(
pad_token_id=__lowercase , eos_token_id=__lowercase , is_encoder_decoder=__lowercase , decoder_start_token_id=__lowercase , forced_eos_token_id=__lowercase , **__lowercase , )
class SCREAMING_SNAKE_CASE_ ( snake_case__ ):
"""simple docstring"""
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.inputs
def __lowercase ( self :int ):
if self.task in ["default", "seq2seq-lm"]:
__lowerCamelCase : Tuple =OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__lowerCamelCase : Optional[Any] ={0: '''batch'''}
__lowerCamelCase : Any ={0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
__lowerCamelCase : Optional[Any] ={0: '''batch''', 1: '''decoder_sequence'''}
__lowerCamelCase : List[Any] ={0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(__lowercase , direction='''inputs''' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowerCamelCase : Any =OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
] )
if self.use_past:
__lowerCamelCase , __lowerCamelCase : str =self.num_layers
for i in range(__lowercase ):
__lowerCamelCase : Optional[int] ={0: '''batch''', 2: '''past_sequence + sequence'''}
__lowerCamelCase : List[str] ={0: '''batch''', 2: '''past_sequence + sequence'''}
else:
__lowerCamelCase : List[str] =OrderedDict(
[
('''input_ids''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''attention_mask''', {0: '''batch''', 1: '''encoder_sequence'''}),
('''decoder_input_ids''', {0: '''batch''', 1: '''decoder_sequence'''}),
('''decoder_attention_mask''', {0: '''batch''', 1: '''decoder_sequence'''}),
] )
return common_inputs
@property
# Copied from transformers.models.bart.configuration_bart.BartOnnxConfig.outputs
def __lowercase ( self :Optional[int] ):
if self.task in ["default", "seq2seq-lm"]:
__lowerCamelCase : Tuple =super().outputs
else:
__lowerCamelCase : List[str] =super(__lowercase , self ).outputs
if self.use_past:
__lowerCamelCase , __lowerCamelCase : int =self.num_layers
for i in range(__lowercase ):
__lowerCamelCase : Any ={0: '''batch''', 2: '''past_sequence + sequence'''}
__lowerCamelCase : Optional[int] ={0: '''batch''', 2: '''past_sequence + sequence'''}
return common_outputs
def __lowercase ( self :str , __lowercase :PreTrainedTokenizer , __lowercase :int = -1 , __lowercase :int = -1 , __lowercase :bool = False , __lowercase :Optional[TensorType] = None , ):
__lowerCamelCase : List[str] =self._generate_dummy_inputs_for_encoder_and_decoder(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
# Generate decoder inputs
__lowerCamelCase : Optional[Any] =seq_length if not self.use_past else 1
__lowerCamelCase : List[Any] =self._generate_dummy_inputs_for_encoder_and_decoder(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
__lowerCamelCase : Dict ={f'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
__lowerCamelCase : str =dict(**__lowercase , **__lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__lowerCamelCase , __lowerCamelCase : int =common_inputs['''input_ids'''].shape
__lowerCamelCase : Optional[Any] =common_inputs['''decoder_input_ids'''].shape[1]
__lowerCamelCase , __lowerCamelCase : Optional[int] =self.num_attention_heads
__lowerCamelCase : Any =(
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowerCamelCase : Dict =decoder_seq_length + 3
__lowerCamelCase : Tuple =(
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowerCamelCase : Optional[int] =torch.cat(
[common_inputs['''decoder_attention_mask'''], torch.ones(__lowercase , __lowercase )] , dim=1 )
__lowerCamelCase : Any =[]
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowerCamelCase , __lowerCamelCase : str =self.num_layers
__lowerCamelCase : List[Any] =min(__lowercase , __lowercase )
__lowerCamelCase : int =max(__lowercase , __lowercase ) - min_num_layers
__lowerCamelCase : Any ='''encoder''' if num_encoder_layers > num_decoder_layers else '''decoder'''
for _ in range(__lowercase ):
common_inputs["past_key_values"].append(
(
torch.zeros(__lowercase ),
torch.zeros(__lowercase ),
torch.zeros(__lowercase ),
torch.zeros(__lowercase ),
) )
# TODO: test this.
__lowerCamelCase : Dict =encoder_shape if remaining_side_name == '''encoder''' else decoder_shape
for _ in range(__lowercase , __lowercase ):
common_inputs["past_key_values"].append((torch.zeros(__lowercase ), torch.zeros(__lowercase )) )
return common_inputs
def __lowercase ( self :Dict , __lowercase :PreTrainedTokenizer , __lowercase :int = -1 , __lowercase :int = -1 , __lowercase :bool = False , __lowercase :Optional[TensorType] = None , ):
__lowerCamelCase : List[str] =self._generate_dummy_inputs_for_encoder_and_decoder(
__lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
if self.use_past:
if not is_torch_available():
raise ValueError('''Cannot generate dummy past_keys inputs without PyTorch installed.''' )
else:
import torch
__lowerCamelCase , __lowerCamelCase : int =common_inputs['''input_ids'''].shape
# Not using the same length for past_key_values
__lowerCamelCase : str =seqlen + 2
__lowerCamelCase , __lowerCamelCase : List[str] =self.num_layers
__lowerCamelCase , __lowerCamelCase : Optional[Any] =self.num_attention_heads
__lowerCamelCase : List[str] =(
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowerCamelCase : Any =common_inputs['''attention_mask'''].dtype
__lowerCamelCase : Optional[int] =torch.cat(
[common_inputs['''attention_mask'''], torch.ones(__lowercase , __lowercase , dtype=__lowercase )] , dim=1 )
__lowerCamelCase : List[str] =[
(torch.zeros(__lowercase ), torch.zeros(__lowercase )) for _ in range(__lowercase )
]
return common_inputs
def __lowercase ( self :int , __lowercase :PreTrainedTokenizer , __lowercase :int = -1 , __lowercase :int = -1 , __lowercase :bool = False , __lowercase :Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowerCamelCase : Union[str, Any] =compute_effective_axis_dimension(
__lowercase , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowerCamelCase : Optional[int] =tokenizer.num_special_tokens_to_add(__lowercase )
__lowerCamelCase : Optional[int] =compute_effective_axis_dimension(
__lowercase , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__lowercase )
# Generate dummy inputs according to compute batch and sequence
__lowerCamelCase : Any =[''' '''.join([tokenizer.unk_token] ) * seq_length] * batch_size
__lowerCamelCase : Union[str, Any] =dict(tokenizer(__lowercase , return_tensors=__lowercase ) )
return common_inputs
def __lowercase ( self :Tuple , __lowercase :PreTrainedTokenizer , __lowercase :int = -1 , __lowercase :int = -1 , __lowercase :bool = False , __lowercase :Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
__lowerCamelCase : int =self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__lowercase , batch_size=__lowercase , seq_length=__lowercase , is_pair=__lowercase , framework=__lowercase )
else:
__lowerCamelCase : List[Any] =self._generate_dummy_inputs_for_causal_lm(
__lowercase , batch_size=__lowercase , seq_length=__lowercase , is_pair=__lowercase , framework=__lowercase )
return common_inputs
def __lowercase ( self :Optional[int] , __lowercase :Tuple , __lowercase :Tuple , __lowercase :Dict , __lowercase :List[str] ):
if self.task in ["default", "seq2seq-lm"]:
__lowerCamelCase : str =super()._flatten_past_key_values_(__lowercase , __lowercase , __lowercase , __lowercase )
else:
__lowerCamelCase : Optional[Any] =super(__lowercase , self )._flatten_past_key_values_(
__lowercase , __lowercase , __lowercase , __lowercase )
@property
def __lowercase ( self :List[str] ):
return 1e-4
| 179 | 0 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
UpperCAmelCase_ = logging.getLogger(__name__)
UpperCAmelCase_ = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
UpperCAmelCase_ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = field(
default=__lowerCamelCase , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
SCREAMING_SNAKE_CASE_ = field(
default=__lowerCamelCase , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(__lowerCamelCase )} , )
SCREAMING_SNAKE_CASE_ = field(
default=__lowerCamelCase , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
SCREAMING_SNAKE_CASE_ = field(
default=__lowerCamelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
SCREAMING_SNAKE_CASE_ = field(
default=__lowerCamelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
SCREAMING_SNAKE_CASE_ = field(
default=__lowerCamelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
SCREAMING_SNAKE_CASE_ = field(
default=__lowerCamelCase , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
SCREAMING_SNAKE_CASE_ = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
SCREAMING_SNAKE_CASE_ = field(
default=__lowerCamelCase , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def __lowerCamelCase( self ):
"""simple docstring"""
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"""--config_overrides can't be used in combination with --config_name or --model_name_or_path""" )
@dataclass
class __SCREAMING_SNAKE_CASE :
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = field(
default=__lowerCamelCase , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
SCREAMING_SNAKE_CASE_ = field(
default=__lowerCamelCase , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
SCREAMING_SNAKE_CASE_ = field(default=__lowerCamelCase , metadata={'help': 'The input training data file (a text file).'} )
SCREAMING_SNAKE_CASE_ = field(
default=__lowerCamelCase , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
SCREAMING_SNAKE_CASE_ = field(
default=__lowerCamelCase , metadata={'help': 'An optional input train ref data file for whole word masking in Chinese.'} , )
SCREAMING_SNAKE_CASE_ = field(
default=__lowerCamelCase , metadata={'help': 'An optional input validation ref data file for whole word masking in Chinese.'} , )
SCREAMING_SNAKE_CASE_ = field(
default=__lowerCamelCase , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
SCREAMING_SNAKE_CASE_ = field(
default=5 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
SCREAMING_SNAKE_CASE_ = field(
default=__lowerCamelCase , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated. Default to the max input length of the model.'
)
} , )
SCREAMING_SNAKE_CASE_ = field(
default=__lowerCamelCase , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
SCREAMING_SNAKE_CASE_ = field(
default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
SCREAMING_SNAKE_CASE_ = field(
default=__lowerCamelCase , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
def __lowerCamelCase( self ):
"""simple docstring"""
if self.train_file is not None:
_snake_case : str = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
_snake_case : Union[str, Any] = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def UpperCAmelCase ( A__ , A__ ) -> List[Any]:
with open(lowerCAmelCase_ , """r""" , encoding="""utf-8""" ) as f:
_snake_case : Tuple = [json.loads(lowerCAmelCase_ ) for line in f.read().splitlines() if (len(lowerCAmelCase_ ) > 0 and not line.isspace())]
assert len(lowerCAmelCase_ ) == len(lowerCAmelCase_ )
_snake_case : Any = {c: dataset[c] for c in dataset.column_names}
_snake_case : List[Any] = refs
return Dataset.from_dict(lowerCAmelCase_ )
def UpperCAmelCase ( ) -> Tuple:
_snake_case : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_snake_case : Optional[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_snake_case : str = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_snake_case : List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_snake_case : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowerCAmelCase_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_snake_case : Optional[int] = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
_snake_case : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''train[:{data_args.validation_split_percentage}%]''' , )
_snake_case : Optional[int] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''train[{data_args.validation_split_percentage}%:]''' , )
else:
_snake_case : Dict = {}
if data_args.train_file is not None:
_snake_case : str = data_args.train_file
if data_args.validation_file is not None:
_snake_case : Any = data_args.validation_file
_snake_case : Any = data_args.train_file.split(""".""" )[-1]
if extension == "txt":
_snake_case : List[str] = "text"
_snake_case : Dict = load_dataset(lowerCAmelCase_ , data_files=lowerCAmelCase_ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_snake_case : Optional[Any] = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
_snake_case : Optional[Any] = AutoConfig.from_pretrained(model_args.config_name , **lowerCAmelCase_ )
elif model_args.model_name_or_path:
_snake_case : str = AutoConfig.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_ )
else:
_snake_case : Optional[int] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(f'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(f'''New config: {config}''' )
_snake_case : List[str] = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
_snake_case : str = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **lowerCAmelCase_ )
elif model_args.model_name_or_path:
_snake_case : Dict = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **lowerCAmelCase_ )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name.""" )
if model_args.model_name_or_path:
_snake_case : Union[str, Any] = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
_snake_case : Dict = AutoModelForMaskedLM.from_config(lowerCAmelCase_ )
model.resize_token_embeddings(len(lowerCAmelCase_ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
_snake_case : Optional[Any] = datasets["train"].column_names
else:
_snake_case : Dict = datasets["validation"].column_names
_snake_case : Union[str, Any] = "text" if "text" in column_names else column_names[0]
_snake_case : Optional[Any] = "max_length" if data_args.pad_to_max_length else False
def tokenize_function(A__ ):
# Remove empty lines
_snake_case : str = [line for line in examples["text"] if len(lowerCAmelCase_ ) > 0 and not line.isspace()]
return tokenizer(examples["""text"""] , padding=lowerCAmelCase_ , truncation=lowerCAmelCase_ , max_length=data_args.max_seq_length )
_snake_case : str = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
_snake_case : List[Any] = add_chinese_references(tokenized_datasets["""train"""] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
_snake_case : List[str] = add_chinese_references(
tokenized_datasets["""validation"""] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
_snake_case : Optional[Any] = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
_snake_case : Union[str, Any] = False
# Data collator
# This one will take care of randomly masking the tokens.
_snake_case : Optional[Any] = DataCollatorForWholeWordMask(tokenizer=lowerCAmelCase_ , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_snake_case : int = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=tokenized_datasets["""train"""] if training_args.do_train else None , eval_dataset=tokenized_datasets["""validation"""] if training_args.do_eval else None , tokenizer=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_snake_case : Dict = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
_snake_case : Dict = model_args.model_name_or_path
else:
_snake_case : int = None
_snake_case : Optional[Any] = trainer.train(resume_from_checkpoint=lowerCAmelCase_ )
trainer.save_model() # Saves the tokenizer too for easy upload
_snake_case : Tuple = os.path.join(training_args.output_dir , """train_results.txt""" )
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , """w""" ) as writer:
logger.info("""***** Train results *****""" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# Evaluation
_snake_case : Dict = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_snake_case : Tuple = trainer.evaluate()
_snake_case : str = math.exp(eval_output["""eval_loss"""] )
_snake_case : Tuple = perplexity
_snake_case : int = os.path.join(training_args.output_dir , """eval_results_mlm_wwm.txt""" )
if trainer.is_world_process_zero():
with open(lowerCAmelCase_ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in sorted(results.items() ):
logger.info(f''' {key} = {value}''' )
writer.write(f'''{key} = {value}\n''' )
return results
def UpperCAmelCase ( A__ ) -> Optional[int]:
main()
if __name__ == "__main__":
main()
| 707 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_roberta import RobertaTokenizer
UpperCAmelCase_ = logging.get_logger(__name__)
UpperCAmelCase_ = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
UpperCAmelCase_ = {
'''vocab_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/vocab.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/vocab.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/vocab.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/vocab.json''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/vocab.json''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/vocab.json'''
),
},
'''merges_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/merges.txt''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/merges.txt''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/merges.txt''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/merges.txt''',
'''roberta-base-openai-detector''': '''https://huggingface.co/roberta-base-openai-detector/resolve/main/merges.txt''',
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/merges.txt'''
),
},
'''tokenizer_file''': {
'''roberta-base''': '''https://huggingface.co/roberta-base/resolve/main/tokenizer.json''',
'''roberta-large''': '''https://huggingface.co/roberta-large/resolve/main/tokenizer.json''',
'''roberta-large-mnli''': '''https://huggingface.co/roberta-large-mnli/resolve/main/tokenizer.json''',
'''distilroberta-base''': '''https://huggingface.co/distilroberta-base/resolve/main/tokenizer.json''',
'''roberta-base-openai-detector''': (
'''https://huggingface.co/roberta-base-openai-detector/resolve/main/tokenizer.json'''
),
'''roberta-large-openai-detector''': (
'''https://huggingface.co/roberta-large-openai-detector/resolve/main/tokenizer.json'''
),
},
}
UpperCAmelCase_ = {
'''roberta-base''': 512,
'''roberta-large''': 512,
'''roberta-large-mnli''': 512,
'''distilroberta-base''': 512,
'''roberta-base-openai-detector''': 512,
'''roberta-large-openai-detector''': 512,
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE_ = ['input_ids', 'attention_mask']
SCREAMING_SNAKE_CASE_ = RobertaTokenizer
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__="replace" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="</s>" , SCREAMING_SNAKE_CASE__="<s>" , SCREAMING_SNAKE_CASE__="<unk>" , SCREAMING_SNAKE_CASE__="<pad>" , SCREAMING_SNAKE_CASE__="<mask>" , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=True , **SCREAMING_SNAKE_CASE__ , ):
"""simple docstring"""
super().__init__(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , tokenizer_file=SCREAMING_SNAKE_CASE__ , errors=SCREAMING_SNAKE_CASE__ , bos_token=SCREAMING_SNAKE_CASE__ , eos_token=SCREAMING_SNAKE_CASE__ , sep_token=SCREAMING_SNAKE_CASE__ , cls_token=SCREAMING_SNAKE_CASE__ , unk_token=SCREAMING_SNAKE_CASE__ , pad_token=SCREAMING_SNAKE_CASE__ , mask_token=SCREAMING_SNAKE_CASE__ , add_prefix_space=SCREAMING_SNAKE_CASE__ , trim_offsets=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ , )
_snake_case : Optional[Any] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
_snake_case : List[Any] = getattr(SCREAMING_SNAKE_CASE__ , pre_tok_state.pop("""type""" ) )
_snake_case : List[str] = add_prefix_space
_snake_case : Union[str, Any] = pre_tok_class(**SCREAMING_SNAKE_CASE__ )
_snake_case : Dict = add_prefix_space
_snake_case : Optional[Any] = """post_processor"""
_snake_case : Optional[Any] = getattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
if tokenizer_component_instance:
_snake_case : Optional[Any] = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_snake_case : Optional[Any] = tuple(state["""sep"""] )
if "cls" in state:
_snake_case : int = tuple(state["""cls"""] )
_snake_case : Tuple = False
if state.get("""add_prefix_space""" , SCREAMING_SNAKE_CASE__ ) != add_prefix_space:
_snake_case : Tuple = add_prefix_space
_snake_case : Tuple = True
if state.get("""trim_offsets""" , SCREAMING_SNAKE_CASE__ ) != trim_offsets:
_snake_case : str = trim_offsets
_snake_case : List[str] = True
if changes_to_apply:
_snake_case : Tuple = getattr(SCREAMING_SNAKE_CASE__ , state.pop("""type""" ) )
_snake_case : Tuple = component_class(**SCREAMING_SNAKE_CASE__ )
setattr(self.backend_tokenizer , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
@property
def __lowerCamelCase( self ):
"""simple docstring"""
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_snake_case : List[Any] = AddedToken(SCREAMING_SNAKE_CASE__ , lstrip=SCREAMING_SNAKE_CASE__ , rstrip=SCREAMING_SNAKE_CASE__ ) if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else value
_snake_case : Any = value
def __lowerCamelCase( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_snake_case : str = kwargs.get("""is_split_into_words""" , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase( self , *SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ):
"""simple docstring"""
_snake_case : List[str] = kwargs.get("""is_split_into_words""" , SCREAMING_SNAKE_CASE__ )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
"""simple docstring"""
_snake_case : Any = self._tokenizer.model.save(SCREAMING_SNAKE_CASE__ , name=SCREAMING_SNAKE_CASE__ )
return tuple(SCREAMING_SNAKE_CASE__ )
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=None ):
"""simple docstring"""
_snake_case : Tuple = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def __lowerCamelCase( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None ):
"""simple docstring"""
_snake_case : Union[str, Any] = [self.sep_token_id]
_snake_case : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
| 519 | 0 |
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_UpperCAmelCase : int = """platform"""
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class lowercase :
__SCREAMING_SNAKE_CASE : List[Any] = PegasusConfig
__SCREAMING_SNAKE_CASE : List[Any] = {}
__SCREAMING_SNAKE_CASE : List[str] = '''gelu'''
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=False , snake_case=99 , snake_case=32 , snake_case=5 , snake_case=4 , snake_case=37 , snake_case=0.1 , snake_case=0.1 , snake_case=20 , snake_case=2 , snake_case=1 , snake_case=0 , ):
snake_case_ = parent
snake_case_ = batch_size
snake_case_ = seq_length
snake_case_ = is_training
snake_case_ = use_labels
snake_case_ = vocab_size
snake_case_ = hidden_size
snake_case_ = num_hidden_layers
snake_case_ = num_attention_heads
snake_case_ = intermediate_size
snake_case_ = hidden_dropout_prob
snake_case_ = attention_probs_dropout_prob
snake_case_ = max_position_embeddings
snake_case_ = eos_token_id
snake_case_ = pad_token_id
snake_case_ = bos_token_id
def a ( self ):
snake_case_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
snake_case_ = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
snake_case_ = np.concatenate([input_ids, eos_tensor] , axis=1 )
snake_case_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
snake_case_ = prepare_pegasus_inputs_dict(snake_case , snake_case , snake_case )
return config, inputs_dict
def a ( self , snake_case , snake_case , snake_case ):
snake_case_ = 20
snake_case_ = model_class_name(snake_case )
snake_case_ = model.encode(inputs_dict['input_ids'] )
snake_case_ , snake_case_ = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
snake_case_ = model.init_cache(decoder_input_ids.shape[0] , snake_case , snake_case )
snake_case_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
snake_case_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
snake_case_ = model.decode(
decoder_input_ids[:, :-1] , snake_case , decoder_attention_mask=snake_case , past_key_values=snake_case , decoder_position_ids=snake_case , )
snake_case_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
snake_case_ = model.decode(
decoder_input_ids[:, -1:] , snake_case , decoder_attention_mask=snake_case , past_key_values=outputs_cache.past_key_values , decoder_position_ids=snake_case , )
snake_case_ = model.decode(snake_case , snake_case )
snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''' )
def a ( self , snake_case , snake_case , snake_case ):
snake_case_ = 20
snake_case_ = model_class_name(snake_case )
snake_case_ = model.encode(inputs_dict['input_ids'] )
snake_case_ , snake_case_ = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
snake_case_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
snake_case_ = model.init_cache(decoder_input_ids.shape[0] , snake_case , snake_case )
snake_case_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
snake_case_ = model.decode(
decoder_input_ids[:, :-1] , snake_case , decoder_attention_mask=snake_case , past_key_values=snake_case , decoder_position_ids=snake_case , )
snake_case_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
snake_case_ = model.decode(
decoder_input_ids[:, -1:] , snake_case , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=snake_case , decoder_position_ids=snake_case , )
snake_case_ = model.decode(snake_case , snake_case , decoder_attention_mask=snake_case )
snake_case_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=F'''Max diff is {diff}''' )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=None , ):
'''simple docstring'''
if attention_mask is None:
snake_case_ = np.not_equal(UpperCamelCase__ , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
snake_case_ = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class lowercase ( lowercase_ , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : str = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
__SCREAMING_SNAKE_CASE : Optional[Any] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
__SCREAMING_SNAKE_CASE : Optional[Any] = True
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : str = False
__SCREAMING_SNAKE_CASE : int = False
def a ( self ):
snake_case_ = FlaxPegasusModelTester(self )
snake_case_ = ConfigTester(self , config_class=snake_case )
def a ( self ):
self.config_tester.run_common_tests()
def a ( self ):
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(snake_case , snake_case , snake_case )
def a ( self ):
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(snake_case , snake_case , snake_case )
def a ( self ):
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
snake_case_ = self._prepare_for_class(snake_case , snake_case )
snake_case_ = model_class(snake_case )
@jax.jit
def encode_jitted(snake_case , snake_case=None , **snake_case ):
return model.encode(input_ids=snake_case , attention_mask=snake_case )
with self.subTest('JIT Enabled' ):
snake_case_ = encode_jitted(**snake_case ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
snake_case_ = encode_jitted(**snake_case ).to_tuple()
self.assertEqual(len(snake_case ) , len(snake_case ) )
for jitted_output, output in zip(snake_case , snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
def a ( self ):
snake_case_ , snake_case_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
snake_case_ = model_class(snake_case )
snake_case_ = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
snake_case_ = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(snake_case , snake_case , snake_case ):
return model.decode(
decoder_input_ids=snake_case , decoder_attention_mask=snake_case , encoder_outputs=snake_case , )
with self.subTest('JIT Enabled' ):
snake_case_ = decode_jitted(**snake_case ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
snake_case_ = decode_jitted(**snake_case ).to_tuple()
self.assertEqual(len(snake_case ) , len(snake_case ) )
for jitted_output, output in zip(snake_case , snake_case ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def a ( self ):
for model_class_name in self.all_model_classes:
snake_case_ = model_class_name.from_pretrained('google/pegasus-large' , from_pt=snake_case )
snake_case_ = np.ones((1, 1) )
snake_case_ = model(snake_case )
self.assertIsNotNone(snake_case )
@slow
def a ( self ):
snake_case_ = FlaxPegasusForConditionalGeneration.from_pretrained('google/pegasus-xsum' )
snake_case_ = PegasusTokenizer.from_pretrained('google/pegasus-xsum' )
snake_case_ = [
' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.',
' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ',
]
snake_case_ = [
'California\'s largest electricity provider has turned off power to hundreds of thousands of customers.',
'Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.',
]
snake_case_ = tokenizer(snake_case , return_tensors='np' , truncation=snake_case , max_length=512 , padding=snake_case )
snake_case_ = model.generate(**snake_case , num_beams=2 ).sequences
snake_case_ = tokenizer.batch_decode(snake_case , skip_special_tokens=snake_case )
assert tgt_text == decoded
| 362 |
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
return 1 / (1 + np.exp(-z ))
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
return (-y * np.log(UpperCamelCase__ ) - (1 - y) * np.log(1 - h )).mean()
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = np.dot(UpperCamelCase__ , UpperCamelCase__ )
return np.sum(y * scores - np.log(1 + np.exp(UpperCamelCase__ ) ) )
def __lowerCamelCase ( UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=70000 ):
'''simple docstring'''
snake_case_ = np.zeros(x.shape[1] )
for iterations in range(UpperCamelCase__ ):
snake_case_ = np.dot(UpperCamelCase__ , UpperCamelCase__ )
snake_case_ = sigmoid_function(UpperCamelCase__ )
snake_case_ = np.dot(x.T , h - y ) / y.size
snake_case_ = theta - alpha * gradient # updating the weights
snake_case_ = np.dot(UpperCamelCase__ , UpperCamelCase__ )
snake_case_ = sigmoid_function(UpperCamelCase__ )
snake_case_ = cost_function(UpperCamelCase__ , UpperCamelCase__ )
if iterations % 100 == 0:
print(F'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
_UpperCAmelCase : str = datasets.load_iris()
_UpperCAmelCase : Union[str, Any] = iris.data[:, :2]
_UpperCAmelCase : List[str] = (iris.target != 0) * 1
_UpperCAmelCase : Optional[int] = 0.1
_UpperCAmelCase : Dict = logistic_reg(alpha, x, y, max_iterations=7_0000)
print("""theta: """, theta) # printing the theta i.e our weights vector
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
return sigmoid_function(
np.dot(UpperCamelCase__ , UpperCamelCase__ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="""b""", label="""0""")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="""r""", label="""1""")
((_UpperCAmelCase) , (_UpperCAmelCase)) : Any = (x[:, 0].min(), x[:, 0].max())
((_UpperCAmelCase) , (_UpperCAmelCase)) : Union[str, Any] = (x[:, 1].min(), x[:, 1].max())
((_UpperCAmelCase) , (_UpperCAmelCase)) : int = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
_UpperCAmelCase : List[Any] = np.c_[xxa.ravel(), xxa.ravel()]
_UpperCAmelCase : Dict = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="""black""")
plt.legend()
plt.show()
| 362 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tensorflow_text_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowerCAmelCase = {
"""configuration_bert""": ["""BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BertConfig""", """BertOnnxConfig"""],
"""tokenization_bert""": ["""BasicTokenizer""", """BertTokenizer""", """WordpieceTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["""BertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BertForMaskedLM""",
"""BertForMultipleChoice""",
"""BertForNextSentencePrediction""",
"""BertForPreTraining""",
"""BertForQuestionAnswering""",
"""BertForSequenceClassification""",
"""BertForTokenClassification""",
"""BertLayer""",
"""BertLMHeadModel""",
"""BertModel""",
"""BertPreTrainedModel""",
"""load_tf_weights_in_bert""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFBertEmbeddings""",
"""TFBertForMaskedLM""",
"""TFBertForMultipleChoice""",
"""TFBertForNextSentencePrediction""",
"""TFBertForPreTraining""",
"""TFBertForQuestionAnswering""",
"""TFBertForSequenceClassification""",
"""TFBertForTokenClassification""",
"""TFBertLMHeadModel""",
"""TFBertMainLayer""",
"""TFBertModel""",
"""TFBertPreTrainedModel""",
]
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = ["""TFBertTokenizer"""]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCAmelCase = [
"""FlaxBertForCausalLM""",
"""FlaxBertForMaskedLM""",
"""FlaxBertForMultipleChoice""",
"""FlaxBertForNextSentencePrediction""",
"""FlaxBertForPreTraining""",
"""FlaxBertForQuestionAnswering""",
"""FlaxBertForSequenceClassification""",
"""FlaxBertForTokenClassification""",
"""FlaxBertModel""",
"""FlaxBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_bert import BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, BertConfig, BertOnnxConfig
from .tokenization_bert import BasicTokenizer, BertTokenizer, WordpieceTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_fast import BertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bert import (
BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
BertForMaskedLM,
BertForMultipleChoice,
BertForNextSentencePrediction,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertForTokenClassification,
BertLayer,
BertLMHeadModel,
BertModel,
BertPreTrainedModel,
load_tf_weights_in_bert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_bert import (
TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFBertEmbeddings,
TFBertForMaskedLM,
TFBertForMultipleChoice,
TFBertForNextSentencePrediction,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertForTokenClassification,
TFBertLMHeadModel,
TFBertMainLayer,
TFBertModel,
TFBertPreTrainedModel,
)
try:
if not is_tensorflow_text_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bert_tf import TFBertTokenizer
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_bert import (
FlaxBertForCausalLM,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
FlaxBertPreTrainedModel,
)
else:
import sys
_lowerCAmelCase = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 716 | import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Any ):
_UpperCamelCase = tempfile.mkdtemp()
# fmt: off
_UpperCamelCase = ['''[UNK]''', '''[CLS]''', '''[SEP]''', '''[PAD]''', '''[MASK]''', '''want''', '''##want''', '''##ed''', '''wa''', '''un''', '''runn''', '''##ing''', ''',''', '''low''', '''lowest''']
# fmt: on
_UpperCamelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as vocab_writer:
vocab_writer.write(''''''.join([x + '''\n''' for x in vocab_tokens] ) )
_UpperCamelCase = {
'''do_resize''': True,
'''size''': {'''height''': 18, '''width''': 18},
'''do_normalize''': True,
'''image_mean''': [0.5, 0.5, 0.5],
'''image_std''': [0.5, 0.5, 0.5],
}
_UpperCamelCase = os.path.join(self.tmpdirname , _A )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(_A , _A )
def UpperCamelCase_ ( self : Tuple , **_A : Optional[Any] ):
return BertTokenizer.from_pretrained(self.tmpdirname , **_A )
def UpperCamelCase_ ( self : List[Any] , **_A : Union[str, Any] ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **_A )
def UpperCamelCase_ ( self : int ):
shutil.rmtree(self.tmpdirname )
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_UpperCamelCase = [Image.fromarray(np.moveaxis(_A , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def UpperCamelCase_ ( self : Optional[int] ):
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A )
processor.save_pretrained(self.tmpdirname )
_UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def UpperCamelCase_ ( self : Optional[Any] ):
_UpperCamelCase = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCamelCase = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
_UpperCamelCase = self.get_image_processor(do_normalize=_A , padding_value=1.0 )
_UpperCamelCase = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=_A , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _A )
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A )
_UpperCamelCase = self.prepare_image_inputs()
_UpperCamelCase = image_processor(_A , return_tensors='''np''' )
_UpperCamelCase = processor(images=_A , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def UpperCamelCase_ ( self : Dict ):
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A )
_UpperCamelCase = '''lower newer'''
_UpperCamelCase = processor(text=_A )
_UpperCamelCase = tokenizer(_A )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def UpperCamelCase_ ( self : Union[str, Any] ):
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A )
_UpperCamelCase = '''lower newer'''
_UpperCamelCase = self.prepare_image_inputs()
_UpperCamelCase = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''token_type_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with self.assertRaises(_A ):
processor()
def UpperCamelCase_ ( self : List[Any] ):
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A )
_UpperCamelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCamelCase = processor.batch_decode(_A )
_UpperCamelCase = tokenizer.batch_decode(_A )
self.assertListEqual(_A , _A )
def UpperCamelCase_ ( self : List[str] ):
_UpperCamelCase = self.get_image_processor()
_UpperCamelCase = self.get_tokenizer()
_UpperCamelCase = VisionTextDualEncoderProcessor(tokenizer=_A , image_processor=_A )
_UpperCamelCase = '''lower newer'''
_UpperCamelCase = self.prepare_image_inputs()
_UpperCamelCase = processor(text=_A , images=_A )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 71 | 0 |
"""simple docstring"""
class _lowerCamelCase :
def __init__( self : str ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase__ : List[str] = """"""
lowerCAmelCase__ : Optional[Any] = """"""
lowerCAmelCase__ : Optional[Any] = []
def _lowerCAmelCase ( self : List[Any] , UpperCamelCase : int , UpperCamelCase : int ) -> int:
"""simple docstring"""
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
lowerCAmelCase__ : Optional[Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
lowerCAmelCase__ : Optional[Any] = self.__min_dist_top_down_dp(UpperCamelCase , n - 1 )
lowerCAmelCase__ : List[Any] = self.__min_dist_top_down_dp(m - 1 , UpperCamelCase )
lowerCAmelCase__ : Optional[Any] = self.__min_dist_top_down_dp(m - 1 , n - 1 )
lowerCAmelCase__ : Tuple = 1 + min(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return self.dp[m][n]
def _lowerCAmelCase ( self : Optional[int] , UpperCamelCase : str , UpperCamelCase : str ) -> int:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = worda
lowerCAmelCase__ : Optional[Any] = worda
lowerCAmelCase__ : Tuple = [[-1 for _ in range(len(UpperCamelCase ) )] for _ in range(len(UpperCamelCase ) )]
return self.__min_dist_top_down_dp(len(UpperCamelCase ) - 1 , len(UpperCamelCase ) - 1 )
def _lowerCAmelCase ( self : int , UpperCamelCase : str , UpperCamelCase : str ) -> int:
"""simple docstring"""
lowerCAmelCase__ : str = worda
lowerCAmelCase__ : Optional[Any] = worda
lowerCAmelCase__ : Optional[Any] = len(UpperCamelCase )
lowerCAmelCase__ : List[Any] = len(UpperCamelCase )
lowerCAmelCase__ : List[str] = [[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
lowerCAmelCase__ : Optional[Any] = j
elif j == 0: # second string is empty
lowerCAmelCase__ : List[Any] = i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
lowerCAmelCase__ : List[str] = self.dp[i - 1][j - 1]
else:
lowerCAmelCase__ : Tuple = self.dp[i][j - 1]
lowerCAmelCase__ : int = self.dp[i - 1][j]
lowerCAmelCase__ : int = self.dp[i - 1][j - 1]
lowerCAmelCase__ : str = 1 + min(UpperCamelCase , UpperCamelCase , UpperCamelCase )
return self.dp[m][n]
if __name__ == "__main__":
_A = EditDistance()
print("""****************** Testing Edit Distance DP Algorithm ******************""")
print()
_A = input("""Enter the first string: """).strip()
_A = input("""Enter the second string: """).strip()
print()
print(f"""The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}""")
print(f"""The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}""")
print()
print("""*************** End of Testing Edit Distance DP Algorithm ***************""")
| 299 |
"""simple docstring"""
import unittest
from transformers.utils.backbone_utils import (
BackboneMixin,
get_aligned_output_features_output_indices,
verify_out_features_out_indices,
)
class _lowerCamelCase ( unittest.TestCase ):
def _lowerCAmelCase ( self : Optional[Any] ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = ["""a""", """b""", """c"""]
# Defaults to last layer if both are None
lowerCAmelCase__ , lowerCAmelCase__ : str = get_aligned_output_features_output_indices(UpperCamelCase , UpperCamelCase , UpperCamelCase )
self.assertEqual(UpperCamelCase , ["""c"""] )
self.assertEqual(UpperCamelCase , [2] )
# Out indices set to match out features
lowerCAmelCase__ , lowerCAmelCase__ : int = get_aligned_output_features_output_indices(["""a""", """c"""] , UpperCamelCase , UpperCamelCase )
self.assertEqual(UpperCamelCase , ["""a""", """c"""] )
self.assertEqual(UpperCamelCase , [0, 2] )
# Out features set to match out indices
lowerCAmelCase__ , lowerCAmelCase__ : int = get_aligned_output_features_output_indices(UpperCamelCase , [0, 2] , UpperCamelCase )
self.assertEqual(UpperCamelCase , ["""a""", """c"""] )
self.assertEqual(UpperCamelCase , [0, 2] )
# Out features selected from negative indices
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = get_aligned_output_features_output_indices(UpperCamelCase , [-3, -1] , UpperCamelCase )
self.assertEqual(UpperCamelCase , ["""a""", """c"""] )
self.assertEqual(UpperCamelCase , [-3, -1] )
def _lowerCAmelCase ( self : List[str] ) -> Tuple:
"""simple docstring"""
# Stage names must be set
with self.assertRaises(UpperCamelCase ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , UpperCamelCase )
# Out features must be a list
with self.assertRaises(UpperCamelCase ):
verify_out_features_out_indices(("""a""", """b""") , (0, 1) , ["""a""", """b"""] )
# Out features must be a subset of stage names
with self.assertRaises(UpperCamelCase ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 1) , ["""a"""] )
# Out indices must be a list or tuple
with self.assertRaises(UpperCamelCase ):
verify_out_features_out_indices(UpperCamelCase , 0 , ["""a""", """b"""] )
# Out indices must be a subset of stage names
with self.assertRaises(UpperCamelCase ):
verify_out_features_out_indices(UpperCamelCase , (0, 1) , ["""a"""] )
# Out features and out indices must be the same length
with self.assertRaises(UpperCamelCase ):
verify_out_features_out_indices(["""a""", """b"""] , (0,) , ["""a""", """b""", """c"""] )
# Out features should match out indices
with self.assertRaises(UpperCamelCase ):
verify_out_features_out_indices(["""a""", """b"""] , (0, 2) , ["""a""", """b""", """c"""] )
# Out features and out indices should be in order
with self.assertRaises(UpperCamelCase ):
verify_out_features_out_indices(["""b""", """a"""] , (0, 1) , ["""a""", """b"""] )
# Check passes with valid inputs
verify_out_features_out_indices(["""a""", """b""", """d"""] , (0, 1, -1) , ["""a""", """b""", """c""", """d"""] )
def _lowerCAmelCase ( self : Dict ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : str = BackboneMixin()
lowerCAmelCase__ : str = ["""a""", """b""", """c"""]
lowerCAmelCase__ : List[str] = ["""a""", """c"""]
lowerCAmelCase__ : Union[str, Any] = [0, 2]
# Check that the output features and indices are set correctly
self.assertEqual(backbone.out_features , ["""a""", """c"""] )
self.assertEqual(backbone.out_indices , [0, 2] )
# Check out features and indices are updated correctly
lowerCAmelCase__ : List[str] = ["""a""", """b"""]
self.assertEqual(backbone.out_features , ["""a""", """b"""] )
self.assertEqual(backbone.out_indices , [0, 1] )
lowerCAmelCase__ : int = [-3, -1]
self.assertEqual(backbone.out_features , ["""a""", """c"""] )
self.assertEqual(backbone.out_indices , [-3, -1] )
| 299 | 1 |
'''simple docstring'''
import math
import os
import re
import sys
import unittest
from pathlib import Path
from typing import Tuple
from unittest.mock import patch
from parameterized import parameterized
from transformers.testing_utils import (
CaptureStderr,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
get_torch_dist_unique_port,
require_apex,
require_bitsandbytes,
require_fairscale,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
require_torch_non_multi_gpu,
slow,
)
from transformers.trainer_callback import TrainerState
from transformers.trainer_utils import set_seed
a__ = os.path.abspath(os.path.dirname(__file__))
with ExtendSysPath(F"{bindir}/../../examples/pytorch/translation"):
from run_translation import main # noqa
set_seed(42)
a__ = '''sshleifer/student_marian_en_ro_6_1'''
a__ = '''sshleifer/tiny-mbart'''
@require_torch
class __magic_name__( __lowerCAmelCase ):
def __lowerCAmelCase( self : Optional[Any] , __UpperCamelCase : Any=False , __UpperCamelCase : int=None , __UpperCamelCase : Dict=True , __UpperCamelCase : List[str]=True , __UpperCamelCase : List[str]=True , __UpperCamelCase : str=True , ):
'''simple docstring'''
snake_case__ = self.run_trainer(
eval_steps=1 , max_len=1_2 , model_name=__UpperCamelCase , num_train_epochs=1 , distributed=__UpperCamelCase , extra_args_str=__UpperCamelCase , predict_with_generate=__UpperCamelCase , do_train=__UpperCamelCase , do_eval=__UpperCamelCase , do_predict=__UpperCamelCase , )
snake_case__ = TrainerState.load_from_json(os.path.join(__UpperCamelCase , """trainer_state.json""" ) ).log_history
if not do_eval:
return
snake_case__ = [log for log in logs if """eval_loss""" in log.keys()]
snake_case__ = eval_metrics[0]
if predict_with_generate:
assert "eval_bleu" in first_step_stats
snake_case__ = eval_metrics[-1]
assert isinstance(last_step_stats["""eval_bleu"""] , __UpperCamelCase )
assert not math.isnan(float(last_step_stats["""eval_loss"""] ) ), "eval_loss must not be `nan`"
@require_torch_non_multi_gpu
def __lowerCAmelCase( self : Optional[Any] ):
'''simple docstring'''
self.run_seqaseq_quick()
@require_torch_multi_gpu
def __lowerCAmelCase( self : Optional[Any] ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=__UpperCamelCase )
@require_torch_multi_gpu
def __lowerCAmelCase( self : Optional[Any] ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=__UpperCamelCase )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def __lowerCAmelCase( self : List[str] ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=__UpperCamelCase , extra_args_str="""--sharded_ddp simple""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def __lowerCAmelCase( self : Optional[int] ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=__UpperCamelCase , extra_args_str="""--sharded_ddp simple --fp16""" )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def __lowerCAmelCase( self : Union[str, Any] ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=__UpperCamelCase , extra_args_str="""--sharded_ddp zero_dp_2""" , predict_with_generate=__UpperCamelCase )
@unittest.skip("""Requires an update of the env running those tests""" )
@require_torch_multi_gpu
@require_fairscale
def __lowerCAmelCase( self : Optional[Any] ):
'''simple docstring'''
self.run_seqaseq_quick(
distributed=__UpperCamelCase , extra_args_str="""--sharded_ddp zero_dp_2 --fp16""" , predict_with_generate=__UpperCamelCase )
@require_apex
@require_torch_gpu
def __lowerCAmelCase( self : Optional[int] ):
'''simple docstring'''
self.run_seqaseq_quick(distributed=__UpperCamelCase , extra_args_str="""--fp16 --fp16_backend=apex""" )
# test 2nd time - was getting eval_loss': nan'
# to reproduce the problem set distributed=False
self.run_seqaseq_quick(distributed=__UpperCamelCase , extra_args_str="""--fp16 --fp16_backend=apex""" )
@parameterized.expand(["""base""", """low""", """high""", """mixed"""] )
@require_torch_multi_gpu
def __lowerCAmelCase( self : Optional[Any] , __UpperCamelCase : str ):
'''simple docstring'''
snake_case__ = {
# test with the default log_level - should be info and thus log info once
"""base""": {"""extra_args_str""": """""", """n_matches""": 1},
# test with low log_level and log_level_replica - should be noisy on all processes
# now the info string should appear twice on 2 processes
"""low""": {"""extra_args_str""": """--log_level debug --log_level_replica debug""", """n_matches""": 2},
# test with high log_level and low log_level_replica
# now the info string should appear once only on the replica
"""high""": {"""extra_args_str""": """--log_level error --log_level_replica debug""", """n_matches""": 1},
# test with high log_level and log_level_replica - should be quiet on all processes
"""mixed""": {"""extra_args_str""": """--log_level error --log_level_replica error""", """n_matches""": 0},
}
snake_case__ = experiments[experiment_id]
snake_case__ = {"""distributed""": True, """predict_with_generate""": False, """do_eval""": False, """do_predict""": False}
snake_case__ = """Running training"""
with CaptureStderr() as cl:
self.run_seqaseq_quick(**__UpperCamelCase , extra_args_str=data["""extra_args_str"""] )
snake_case__ = len(re.findall(__UpperCamelCase , cl.err ) )
self.assertEqual(__UpperCamelCase , data["""n_matches"""] )
@slow
def __lowerCAmelCase( self : Optional[Any] ):
'''simple docstring'''
snake_case__ = self.run_trainer(
eval_steps=2 , max_len=1_2_8 , model_name=__UpperCamelCase , learning_rate=3E-4 , num_train_epochs=1_0 , distributed=__UpperCamelCase , )
# Check metrics
snake_case__ = TrainerState.load_from_json(os.path.join(__UpperCamelCase , """trainer_state.json""" ) ).log_history
snake_case__ = [log for log in logs if """eval_loss""" in log.keys()]
snake_case__ = eval_metrics[0]
snake_case__ = eval_metrics[-1]
assert first_step_stats["eval_loss"] > last_step_stats["eval_loss"], "model learned nothing"
assert isinstance(last_step_stats["""eval_bleu"""] , __UpperCamelCase )
# test if do_predict saves generations and metrics
snake_case__ = os.listdir(__UpperCamelCase )
snake_case__ = {os.path.basename(__UpperCamelCase ) for p in contents}
assert "generated_predictions.txt" in contents
assert "predict_results.json" in contents
@slow
@require_bitsandbytes
def __lowerCAmelCase( self : Union[str, Any] ):
'''simple docstring'''
from transformers.training_args import OptimizerNames
def train_and_return_metrics(__UpperCamelCase : str ) -> Tuple[int, float]:
snake_case__ = """--skip_memory_metrics 0"""
snake_case__ = self.run_trainer(
max_len=1_2_8 , model_name=__UpperCamelCase , learning_rate=3E-4 , num_train_epochs=1 , optim=__UpperCamelCase , distributed=__UpperCamelCase , extra_args_str=__UpperCamelCase , do_eval=__UpperCamelCase , do_predict=__UpperCamelCase , n_gpus_to_use=1 , )
# Check metrics
snake_case__ = TrainerState.load_from_json(Path(__UpperCamelCase , """trainer_state.json""" ) ).log_history
snake_case__ = int(logs[0]["""train_mem_gpu_peaked_delta"""] / 2**2_0 )
snake_case__ = int(logs[0]["""train_mem_gpu_alloc_delta"""] / 2**2_0 )
snake_case__ = logs[0]["""train_loss"""]
return gpu_peak_mem_mb, gpu_alloc_mem_mb, loss
snake_case__ , snake_case__ , snake_case__ = train_and_return_metrics(OptimizerNames.ADAMW_TORCH.value )
snake_case__ , snake_case__ , snake_case__ = train_and_return_metrics(OptimizerNames.ADAMW_BNB.value )
snake_case__ = gpu_alloc_mem_orig - gpu_alloc_mem_bnb
snake_case__ = gpu_peak_mem_orig + gpu_alloc_mem_orig
snake_case__ = gpu_peak_mem_bnb + gpu_alloc_mem_bnb
snake_case__ = gpu_total_mem_orig - gpu_total_mem_bnb
# sshleifer/student_marian_en_ro_6_1 has 54M parameter, 29M of which is `nn.Embedding` which
# doesn't get quantized and remains in fp32. Therefore we only have 25M parameters quantized
# in 2 bytes and the diff in optim memory usage is derived as so:
#
# - normal 25*8=~200MB (8 bytes per param)
# - bnb 25*2= ~50MB (2 bytes per param)
#
# Thus we should expect ~150MB total memory saved.
#
# Peak memory should be the same - the total should be different by about that same margin
#
# After leaving a small margin to accommodate for differences between gpus let's check
# that we have at least 120MB in savings
snake_case__ = 1_2_0
# uncomment the following if this test starts failing - requires py38 for a new print feature
# gpu_peak_mem_diff = gpu_peak_mem_orig - gpu_peak_mem_bnb
# print(f"{gpu_alloc_mem_orig=}MB {gpu_peak_mem_orig=}MB {gpu_alloc_mem_orig+gpu_peak_mem_orig=}MB")
# print(f" {gpu_alloc_mem_bnb=}MB {gpu_peak_mem_bnb=}MB {gpu_alloc_mem_bnb+gpu_peak_mem_bnb=}MB")
# print(f"{gpu_alloc_mem_diff=}MB")
# print(f"{gpu_peak_mem_diff=}MB")
# print(f"{gpu_total_mem_orig=}MB, {gpu_total_mem_bnb=}MB")
# print(f"{gpu_total_mem_diff=}MB, {gpu_total_mem_diff=}MB")
self.assertGreater(
__UpperCamelCase , __UpperCamelCase , """should use ~150MB less alloc gpu memory with BNB, compared to without it for this model but got"""
f""" a difference of {gpu_alloc_mem_diff}MB, with gpu_alloc_mem_orig={gpu_alloc_mem_orig}MB and"""
f""" gpu_alloc_mem_bnb={gpu_alloc_mem_bnb}MB""" , )
self.assertGreater(
__UpperCamelCase , __UpperCamelCase , """should use ~150MB less total gpu memory with BNB, compared to without it for this model but got"""
f""" a difference of {gpu_total_mem_diff}MB, with gpu_total_mem_orig={gpu_total_mem_orig}MB and"""
f""" gpu_total_mem_bnb={gpu_total_mem_bnb}MB""" , )
self.assertEqual(
__UpperCamelCase , __UpperCamelCase , f"""loss should be the same, but got loss_orig={loss_orig}, loss_bnb={loss_bnb}""" )
def __lowerCAmelCase( self : str , __UpperCamelCase : int , __UpperCamelCase : str , __UpperCamelCase : int , __UpperCamelCase : float = 3E-3 , __UpperCamelCase : str = "adafactor" , __UpperCamelCase : bool = False , __UpperCamelCase : str = None , __UpperCamelCase : int = 0 , __UpperCamelCase : bool = True , __UpperCamelCase : bool = True , __UpperCamelCase : bool = True , __UpperCamelCase : bool = True , __UpperCamelCase : int = None , ):
'''simple docstring'''
snake_case__ = self.test_file_dir / """../fixtures/tests_samples/wmt_en_ro"""
snake_case__ = self.get_auto_remove_tmp_dir()
snake_case__ = f"""
--model_name_or_path {model_name}
--train_file {data_dir}/train.json
--validation_file {data_dir}/val.json
--test_file {data_dir}/test.json
--output_dir {output_dir}
--overwrite_output_dir
--max_train_samples 8
--max_source_length {max_len}
--max_target_length {max_len}
--do_train
--num_train_epochs {str(__UpperCamelCase )}
--per_device_train_batch_size 4
--learning_rate {learning_rate}
--warmup_steps 8
--logging_steps 0
--logging_strategy no
--save_steps {str(__UpperCamelCase )}
--group_by_length
--label_smoothing_factor 0.1
--target_lang ro_RO
--source_lang en_XX
""".split()
snake_case__ = f"""
--do_eval
--per_device_eval_batch_size 4
--max_eval_samples 8
--val_max_target_length {max_len}
--evaluation_strategy steps
--eval_steps {str(__UpperCamelCase )}
""".split()
snake_case__ = """
--do_predict
""".split()
snake_case__ = []
if do_train:
args += args_train
if do_eval:
args += args_eval
if do_predict:
args += args_predict
if predict_with_generate:
args += "--predict_with_generate".split()
if do_train:
if optim == "adafactor":
args += "--adafactor".split()
else:
args += f"""--optim {optim}""".split()
if extra_args_str is not None:
args += extra_args_str.split()
if distributed:
if n_gpus_to_use is None:
snake_case__ = get_gpu_count()
snake_case__ = get_torch_dist_unique_port()
snake_case__ = f"""
-m torch.distributed.run
--nproc_per_node={n_gpus_to_use}
--master_port={master_port}
{self.examples_dir_str}/pytorch/translation/run_translation.py
""".split()
snake_case__ = [sys.executable] + distributed_args + args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__UpperCamelCase , env=self.get_env() )
else:
snake_case__ = ["""run_translation.py"""] + args
with patch.object(__UpperCamelCase , """argv""" , __UpperCamelCase ):
main()
return output_dir | 566 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a__ = logging.get_logger(__name__)
a__ = {'''vocab_file''': '''spiece.model'''}
a__ = {
'''vocab_file''': {
'''bert_for_seq_generation''': (
'''https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'''
),
}
}
a__ = {'''bert_for_seq_generation''': 512}
class __magic_name__( __lowerCAmelCase ):
UpperCAmelCase_ : Union[str, Any] = VOCAB_FILES_NAMES
UpperCAmelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ : List[int] = []
UpperCAmelCase_ : Tuple = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : Any="<s>" , __UpperCamelCase : str="</s>" , __UpperCamelCase : List[Any]="<unk>" , __UpperCamelCase : Union[str, Any]="<pad>" , __UpperCamelCase : Optional[Any]="<::::>" , __UpperCamelCase : Optional[Dict[str, Any]] = None , **__UpperCamelCase : Dict , ):
'''simple docstring'''
snake_case__ = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , sep_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
snake_case__ = vocab_file
snake_case__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCamelCase )
@property
def __lowerCAmelCase( self : Union[str, Any] ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def __lowerCAmelCase( self : Optional[int] ):
'''simple docstring'''
snake_case__ = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : int ):
'''simple docstring'''
snake_case__ = self.__dict__.copy()
snake_case__ = None
return state
def __setstate__( self : int , __UpperCamelCase : int ):
'''simple docstring'''
snake_case__ = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
snake_case__ = {}
snake_case__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def __lowerCAmelCase( self : Optional[Any] , __UpperCamelCase : str ):
'''simple docstring'''
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def __lowerCAmelCase( self : List[Any] , __UpperCamelCase : Dict ):
'''simple docstring'''
return self.sp_model.piece_to_id(__UpperCamelCase )
def __lowerCAmelCase( self : str , __UpperCamelCase : Optional[Any] ):
'''simple docstring'''
snake_case__ = self.sp_model.IdToPiece(__UpperCamelCase )
return token
def __lowerCAmelCase( self : int , __UpperCamelCase : str ):
'''simple docstring'''
snake_case__ = []
snake_case__ = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__UpperCamelCase ) + token
snake_case__ = []
else:
current_sub_tokens.append(__UpperCamelCase )
out_string += self.sp_model.decode(__UpperCamelCase )
return out_string.strip()
def __lowerCAmelCase( self : str , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(__UpperCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
snake_case__ = os.path.join(
__UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , """wb""" ) as fi:
snake_case__ = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,) | 566 | 1 |
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''1.0.0a'''):
raise Exception('''requires fairseq >= 1.0.0a''')
logging.set_verbosity_info()
__lowercase : str = logging.get_logger(__name__)
__lowercase : Dict = '''Hello world! cécé herlolip'''
def lowercase ( __A : str , __A : str , __A : bool ) -> Optional[Any]:
'''simple docstring'''
snake_case : int = FairseqRobertaModel.from_pretrained(lowerCamelCase__ )
roberta.eval() # disable dropout
snake_case : Tuple = roberta.model.encoder.sentence_encoder
snake_case : Tuple = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1E-5 , )
if classification_head:
snake_case : Any = roberta.model.classification_heads["""mnli"""].out_proj.weight.shape[0]
print("""Our RoBERTa config:""" , lowerCamelCase__ )
snake_case : List[Any] = XLMRobertaXLForSequenceClassification(lowerCamelCase__ ) if classification_head else XLMRobertaXLForMaskedLM(lowerCamelCase__ )
model.eval()
# Now let's copy all the weights.
# Embeddings
snake_case : int = roberta_sent_encoder.embed_tokens.weight
snake_case : Union[str, Any] = roberta_sent_encoder.embed_positions.weight
snake_case : int = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
snake_case : int = roberta_sent_encoder.layer_norm.weight
snake_case : List[Any] = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
snake_case : BertLayer = model.roberta.encoder.layer[i]
snake_case : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
snake_case : RobertaAttention = layer.attention
snake_case : str = roberta_layer.self_attn_layer_norm.weight
snake_case : Union[str, Any] = roberta_layer.self_attn_layer_norm.bias
# self attention
snake_case : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
snake_case : Optional[Any] = roberta_layer.self_attn.q_proj.weight
snake_case : str = roberta_layer.self_attn.q_proj.bias
snake_case : Optional[int] = roberta_layer.self_attn.k_proj.weight
snake_case : Optional[int] = roberta_layer.self_attn.k_proj.bias
snake_case : int = roberta_layer.self_attn.v_proj.weight
snake_case : Union[str, Any] = roberta_layer.self_attn.v_proj.bias
# self-attention output
snake_case : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
snake_case : Any = roberta_layer.self_attn.out_proj.weight
snake_case : Optional[int] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
snake_case : Optional[Any] = roberta_layer.final_layer_norm.weight
snake_case : Any = roberta_layer.final_layer_norm.bias
# intermediate
snake_case : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
snake_case : Dict = roberta_layer.fca.weight
snake_case : Any = roberta_layer.fca.bias
# output
snake_case : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
snake_case : Union[str, Any] = roberta_layer.fca.weight
snake_case : Optional[Any] = roberta_layer.fca.bias
# end of layer
if classification_head:
snake_case : Optional[Any] = roberta.model.classification_heads["""mnli"""].dense.weight
snake_case : str = roberta.model.classification_heads["""mnli"""].dense.bias
snake_case : str = roberta.model.classification_heads["""mnli"""].out_proj.weight
snake_case : List[str] = roberta.model.classification_heads["""mnli"""].out_proj.bias
else:
# LM Head
snake_case : Tuple = roberta.model.encoder.lm_head.dense.weight
snake_case : int = roberta.model.encoder.lm_head.dense.bias
snake_case : Any = roberta.model.encoder.lm_head.layer_norm.weight
snake_case : Union[str, Any] = roberta.model.encoder.lm_head.layer_norm.bias
snake_case : Dict = roberta.model.encoder.lm_head.weight
snake_case : List[Any] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
snake_case : torch.Tensor = roberta.encode(lowerCamelCase__ ).unsqueeze(0 ) # batch of size 1
snake_case : Any = model(lowerCamelCase__ )[0]
if classification_head:
snake_case : Optional[Any] = roberta.model.classification_heads["""mnli"""](roberta.extract_features(lowerCamelCase__ ) )
else:
snake_case : Tuple = roberta.model(lowerCamelCase__ )[0]
print(our_output.shape , their_output.shape )
snake_case : Tuple = torch.max(torch.abs(our_output - their_output ) ).item()
print(f"""max_absolute_diff = {max_absolute_diff}""" ) # ~ 1e-7
snake_case : int = torch.allclose(lowerCamelCase__ , lowerCamelCase__ , atol=1E-3 )
print("""Do both models output the same tensors?""" , """🔥""" if success else """💩""" )
if not success:
raise Exception("""Something went wRoNg""" )
pathlib.Path(lowerCamelCase__ ).mkdir(parents=lowerCamelCase__ , exist_ok=lowerCamelCase__ )
print(f"""Saving model to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCamelCase__ )
if __name__ == "__main__":
__lowercase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--roberta_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
__lowercase : str = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
)
| 36 |
"""simple docstring"""
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
__snake_case = None
try:
import msvcrt
except ImportError:
__snake_case = None
try:
import fcntl
except ImportError:
__snake_case = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
__snake_case = OSError
# Data
# ------------------------------------------------
__snake_case = [
'Timeout',
'BaseFileLock',
'WindowsFileLock',
'UnixFileLock',
'SoftFileLock',
'FileLock',
]
__snake_case = '3.0.12'
__snake_case = None
def _lowerCamelCase ( ):
global _logger
lowercase__ : Tuple = _logger or logging.getLogger(__name__ )
return _logger
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ ) -> Optional[int]:
lowercase__ : Union[str, Any] = lock_file
return None
def __str__( self ) -> List[Any]:
lowercase__ : Tuple = F'''The file lock \'{self.lock_file}\' could not be acquired.'''
return temp
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , lowerCamelCase__ ) -> Optional[Any]:
lowercase__ : str = lock
return None
def __enter__( self ) -> List[Any]:
return self.lock
def __exit__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Any:
self.lock.release()
return None
class _SCREAMING_SNAKE_CASE :
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=-1 , lowerCamelCase__=None ) -> Optional[Any]:
lowercase__ : List[Any] = max_filename_length if max_filename_length is not None else 255
# Hash the filename if it's too long
lowercase__ : Union[str, Any] = self.hash_filename_if_too_long(lowerCamelCase__ , lowerCamelCase__ )
# The path to the lock file.
lowercase__ : int = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
lowercase__ : Dict = None
# The default timeout value.
lowercase__ : Optional[Any] = timeout
# We use this lock primarily for the lock counter.
lowercase__ : Optional[int] = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
lowercase__ : Union[str, Any] = 0
return None
@property
def UpperCAmelCase__( self ) -> List[str]:
return self._lock_file
@property
def UpperCAmelCase__( self ) -> Union[str, Any]:
return self._timeout
@timeout.setter
def UpperCAmelCase__( self , lowerCamelCase__ ) -> Optional[Any]:
lowercase__ : Union[str, Any] = float(lowerCamelCase__ )
return None
def UpperCAmelCase__( self ) -> Tuple:
raise NotImplementedError()
def UpperCAmelCase__( self ) -> Tuple:
raise NotImplementedError()
@property
def UpperCAmelCase__( self ) -> str:
return self._lock_file_fd is not None
def UpperCAmelCase__( self , lowerCamelCase__=None , lowerCamelCase__=0.05 ) -> List[str]:
# Use the default timeout, if no timeout is provided.
if timeout is None:
lowercase__ : int = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
lowercase__ : Tuple = id(self )
lowercase__ : Any = self._lock_file
lowercase__ : Union[str, Any] = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(F'''Attempting to acquire lock {lock_id} on {lock_filename}''' )
self._acquire()
if self.is_locked:
logger().debug(F'''Lock {lock_id} acquired on {lock_filename}''' )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(F'''Timeout on acquiring lock {lock_id} on {lock_filename}''' )
raise Timeout(self._lock_file )
else:
logger().debug(
F'''Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...''' )
time.sleep(lowerCamelCase__ )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
lowercase__ : Any = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def UpperCAmelCase__( self , lowerCamelCase__=False ) -> int:
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
lowercase__ : Tuple = id(self )
lowercase__ : int = self._lock_file
logger().debug(F'''Attempting to release lock {lock_id} on {lock_filename}''' )
self._release()
lowercase__ : str = 0
logger().debug(F'''Lock {lock_id} released on {lock_filename}''' )
return None
def __enter__( self ) -> Dict:
self.acquire()
return self
def __exit__( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Dict:
self.release()
return None
def __del__( self ) -> int:
self.release(force=lowerCamelCase__ )
return None
def UpperCAmelCase__( self , lowerCamelCase__ , lowerCamelCase__ ) -> str:
lowercase__ : Optional[int] = os.path.basename(lowerCamelCase__ )
if len(lowerCamelCase__ ) > max_length and max_length > 0:
lowercase__ : Union[str, Any] = os.path.dirname(lowerCamelCase__ )
lowercase__ : List[Any] = str(hash(lowerCamelCase__ ) )
lowercase__ : Optional[int] = filename[: max_length - len(lowerCamelCase__ ) - 8] + """...""" + hashed_filename + """.lock"""
return os.path.join(lowerCamelCase__ , lowerCamelCase__ )
else:
return path
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=-1 , lowerCamelCase__=None ) -> Tuple:
from .file_utils import relative_to_absolute_path
super().__init__(lowerCamelCase__ , timeout=lowerCamelCase__ , max_filename_length=lowerCamelCase__ )
lowercase__ : List[Any] = """\\\\?\\""" + relative_to_absolute_path(self.lock_file )
def UpperCAmelCase__( self ) -> Tuple:
lowercase__ : Union[str, Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
lowercase__ : Dict = os.open(self._lock_file , lowerCamelCase__ )
except OSError:
pass
else:
try:
msvcrt.locking(lowerCamelCase__ , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(lowerCamelCase__ )
else:
lowercase__ : Optional[Any] = fd
return None
def UpperCAmelCase__( self ) -> List[Any]:
lowercase__ : int = self._lock_file_fd
lowercase__ : Any = None
msvcrt.locking(lowerCamelCase__ , msvcrt.LK_UNLCK , 1 )
os.close(lowerCamelCase__ )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
"""simple docstring"""
def __init__( self , lowerCamelCase__ , lowerCamelCase__=-1 , lowerCamelCase__=None ) -> List[str]:
lowercase__ : Optional[Any] = os.statvfs(os.path.dirname(lowerCamelCase__ ) ).f_namemax
super().__init__(lowerCamelCase__ , timeout=lowerCamelCase__ , max_filename_length=lowerCamelCase__ )
def UpperCAmelCase__( self ) -> str:
lowercase__ : List[Any] = os.O_RDWR | os.O_CREAT | os.O_TRUNC
lowercase__ : List[Any] = os.open(self._lock_file , lowerCamelCase__ )
try:
fcntl.flock(lowerCamelCase__ , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(lowerCamelCase__ )
else:
lowercase__ : Any = fd
return None
def UpperCAmelCase__( self ) -> str:
# Do not remove the lockfile:
#
# https://github.com/benediktschmitt/py-filelock/issues/31
# https://stackoverflow.com/questions/17708885/flock-removing-locked-file-without-race-condition
lowercase__ : Optional[int] = self._lock_file_fd
lowercase__ : Optional[Any] = None
fcntl.flock(lowerCamelCase__ , fcntl.LOCK_UN )
os.close(lowerCamelCase__ )
return None
class _SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
"""simple docstring"""
def UpperCAmelCase__( self ) -> List[str]:
lowercase__ : Tuple = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
lowercase__ : Any = os.open(self._lock_file , lowerCamelCase__ )
except OSError:
pass
else:
lowercase__ : Union[str, Any] = fd
return None
def UpperCAmelCase__( self ) -> Tuple:
os.close(self._lock_file_fd )
lowercase__ : Optional[Any] = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
__snake_case = None
if msvcrt:
__snake_case = WindowsFileLock
elif fcntl:
__snake_case = UnixFileLock
else:
__snake_case = SoftFileLock
if warnings is not None:
warnings.warn('only soft file lock is available') | 200 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class UpperCAmelCase_ ( A_ , A_ , unittest.TestCase ):
lowerCamelCase : List[Any] = StableDiffusionXLImgaImgPipeline
lowerCamelCase : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'''height''', '''width'''}
lowerCamelCase : List[str] = PipelineTesterMixin.required_optional_params - {'''latents'''}
lowerCamelCase : List[str] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCamelCase : List[str] = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCamelCase : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __UpperCAmelCase ( self : Union[str, Any] ) -> Any:
torch.manual_seed(0 )
lowerCAmelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , attention_head_dim=(2, 4) , use_linear_projection=UpperCAmelCase__ , addition_embed_type='text_time' , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=8_0 , cross_attention_dim=6_4 , )
lowerCAmelCase = EulerDiscreteScheduler(
beta_start=0.00_085 , beta_end=0.012 , steps_offset=1 , beta_schedule='scaled_linear' , timestep_spacing='leading' , )
torch.manual_seed(0 )
lowerCAmelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
lowerCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=3_2 , )
lowerCAmelCase = CLIPTextModel(UpperCAmelCase__ )
lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=UpperCAmelCase__ )
lowerCAmelCase = CLIPTextModelWithProjection(UpperCAmelCase__ )
lowerCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' , local_files_only=UpperCAmelCase__ )
lowerCAmelCase = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""text_encoder_2""": text_encoder_a,
"""tokenizer_2""": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def __UpperCAmelCase ( self : Union[str, Any] , UpperCAmelCase__ : List[Any] , UpperCAmelCase__ : Union[str, Any]=0 ) -> str:
lowerCAmelCase = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(UpperCAmelCase__ ) ).to(UpperCAmelCase__ )
lowerCAmelCase = image / 2 + 0.5
if str(UpperCAmelCase__ ).startswith('mps' ):
lowerCAmelCase = torch.manual_seed(UpperCAmelCase__ )
else:
lowerCAmelCase = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
lowerCAmelCase = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 5.0,
"""output_type""": """numpy""",
"""strength""": 0.75,
}
return inputs
def __UpperCAmelCase ( self : List[str] ) -> Dict:
lowerCAmelCase = """cpu""" # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = StableDiffusionXLImgaImgPipeline(**UpperCAmelCase__ )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowerCAmelCase = self.get_dummy_inputs(UpperCAmelCase__ )
lowerCAmelCase = sd_pipe(**UpperCAmelCase__ ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
lowerCAmelCase = np.array([0.4_656, 0.4_840, 0.4_439, 0.6_698, 0.5_574, 0.4_524, 0.5_799, 0.5_943, 0.5_165] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
super().test_attention_slicing_forward_pass(expected_max_diff=3E-3 )
def __UpperCAmelCase ( self : Any ) -> Union[str, Any]:
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __UpperCAmelCase ( self : str ) -> Dict:
pass
def __UpperCAmelCase ( self : Dict ) -> Any:
lowerCAmelCase = self.get_dummy_components()
lowerCAmelCase = StableDiffusionXLImgaImgPipeline(**UpperCAmelCase__ )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
lowerCAmelCase = sd_pipe.to(UpperCAmelCase__ )
sd_pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
# forward without prompt embeds
lowerCAmelCase = self.get_dummy_inputs(UpperCAmelCase__ )
lowerCAmelCase = 3 * ["""this is a negative prompt"""]
lowerCAmelCase = negative_prompt
lowerCAmelCase = 3 * [inputs["""prompt"""]]
lowerCAmelCase = sd_pipe(**UpperCAmelCase__ )
lowerCAmelCase = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
lowerCAmelCase = self.get_dummy_inputs(UpperCAmelCase__ )
lowerCAmelCase = 3 * ["""this is a negative prompt"""]
lowerCAmelCase = 3 * [inputs.pop('prompt' )]
(
lowerCAmelCase
) = sd_pipe.encode_prompt(UpperCAmelCase__ , negative_prompt=UpperCAmelCase__ )
lowerCAmelCase = sd_pipe(
**UpperCAmelCase__ , prompt_embeds=UpperCAmelCase__ , negative_prompt_embeds=UpperCAmelCase__ , pooled_prompt_embeds=UpperCAmelCase__ , negative_pooled_prompt_embeds=UpperCAmelCase__ , )
lowerCAmelCase = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1E-4
@slow
@require_torch_gpu
class UpperCAmelCase_ ( unittest.TestCase ):
def __UpperCAmelCase ( self : Optional[Any] ) -> Optional[int]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __UpperCAmelCase ( self : Optional[int] , UpperCAmelCase__ : Optional[int] , UpperCAmelCase__ : int="cpu" , UpperCAmelCase__ : int=torch.floataa , UpperCAmelCase__ : Tuple=0 ) -> List[str]:
lowerCAmelCase = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
lowerCAmelCase = np.random.RandomState(UpperCAmelCase__ ).standard_normal((1, 4, 6_4, 6_4) )
lowerCAmelCase = torch.from_numpy(UpperCAmelCase__ ).to(device=UpperCAmelCase__ , dtype=UpperCAmelCase__ )
lowerCAmelCase = {
"""prompt""": """a photograph of an astronaut riding a horse""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def __UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
lowerCAmelCase = DiffusionPipeline.from_pretrained('stabilityai/stable-diffusion-2-base' )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
lowerCAmelCase = self.get_inputs(UpperCAmelCase__ )
lowerCAmelCase = pipe(**UpperCAmelCase__ ).images
lowerCAmelCase = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
lowerCAmelCase = np.array([0.49_493, 0.47_896, 0.40_798, 0.54_214, 0.53_212, 0.48_202, 0.47_656, 0.46_329, 0.48_506] )
assert np.abs(image_slice - expected_slice ).max() < 7E-3
| 708 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__snake_case ={
"""configuration_chinese_clip""": [
"""CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ChineseCLIPConfig""",
"""ChineseCLIPOnnxConfig""",
"""ChineseCLIPTextConfig""",
"""ChineseCLIPVisionConfig""",
],
"""processing_chinese_clip""": ["""ChineseCLIPProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =["""ChineseCLIPFeatureExtractor"""]
__snake_case =["""ChineseCLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case =[
"""CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ChineseCLIPModel""",
"""ChineseCLIPPreTrainedModel""",
"""ChineseCLIPTextModel""",
"""ChineseCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
__snake_case =_LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 513 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_UpperCAmelCase : Union[str, Any] = logging.get_logger(__name__)
_UpperCAmelCase : Union[str, Any] = {
"""microsoft/trocr-base-handwritten""": (
"""https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"""
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class lowercase ( lowercase_ ):
__SCREAMING_SNAKE_CASE : List[str] = '''trocr'''
__SCREAMING_SNAKE_CASE : Union[str, Any] = ['''past_key_values''']
__SCREAMING_SNAKE_CASE : Dict = {
'''num_attention_heads''': '''decoder_attention_heads''',
'''hidden_size''': '''d_model''',
'''num_hidden_layers''': '''decoder_layers''',
}
def __init__( self , snake_case=5_0265 , snake_case=1024 , snake_case=12 , snake_case=16 , snake_case=4096 , snake_case="gelu" , snake_case=512 , snake_case=0.1 , snake_case=0.0 , snake_case=0.0 , snake_case=2 , snake_case=0.02 , snake_case=0.0 , snake_case=True , snake_case=False , snake_case=True , snake_case=True , snake_case=1 , snake_case=0 , snake_case=2 , **snake_case , ):
snake_case_ = vocab_size
snake_case_ = d_model
snake_case_ = decoder_layers
snake_case_ = decoder_attention_heads
snake_case_ = decoder_ffn_dim
snake_case_ = activation_function
snake_case_ = max_position_embeddings
snake_case_ = dropout
snake_case_ = attention_dropout
snake_case_ = activation_dropout
snake_case_ = init_std
snake_case_ = decoder_layerdrop
snake_case_ = use_cache
snake_case_ = scale_embedding
snake_case_ = use_learned_position_embeddings
snake_case_ = layernorm_embedding
super().__init__(
pad_token_id=snake_case , bos_token_id=snake_case , eos_token_id=snake_case , decoder_start_token_id=snake_case , **snake_case , )
| 362 |
_UpperCAmelCase : Any = 6_5521
def __lowerCamelCase ( UpperCamelCase__ ):
'''simple docstring'''
snake_case_ = 1
snake_case_ = 0
for plain_chr in plain_text:
snake_case_ = (a + ord(UpperCamelCase__ )) % MOD_ADLER
snake_case_ = (b + a) % MOD_ADLER
return (b << 16) | a
| 362 | 1 |
import torch
import torch.nn as nn
from transformers.modeling_utils import ModuleUtilsMixin
from transformers.models.ta.modeling_ta import TaBlock, TaConfig, TaLayerNorm
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class _UpperCAmelCase ( A__ , A__ , A__ ):
@register_to_config
def __init__( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ = False , ):
super().__init__()
A__ = nn.Embedding(a__ , a__)
A__ = nn.Embedding(a__ , a__)
A__ = False
A__ = nn.Dropout(p=a__)
A__ = TaConfig(
vocab_size=a__ , d_model=a__ , num_heads=a__ , d_kv=a__ , d_ff=a__ , dropout_rate=a__ , feed_forward_proj=a__ , is_decoder=a__ , is_encoder_decoder=a__ , )
A__ = nn.ModuleList()
for lyr_num in range(a__):
A__ = TaBlock(a__)
self.encoders.append(a__)
A__ = TaLayerNorm(a__)
A__ = nn.Dropout(p=a__)
def snake_case_ ( self , a__ , a__):
A__ = self.token_embedder(a__)
A__ = encoder_input_tokens.shape[1]
A__ = torch.arange(a__ , device=encoder_input_tokens.device)
x += self.position_encoding(a__)
A__ = self.dropout_pre(a__)
# inverted the attention mask
A__ = encoder_input_tokens.size()
A__ = self.get_extended_attention_mask(a__ , a__)
for lyr in self.encoders:
A__ = lyr(a__ , a__)[0]
A__ = self.layer_norm(a__)
return self.dropout_post(a__), encoder_inputs_mask
| 526 |
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
_lowercase = logging.get_logger(__name__)
_lowercase = "T5Config"
def lowerCAmelCase__ ( UpperCamelCase_ : jnp.array , UpperCamelCase_ : int , UpperCamelCase_ : int )-> jnp.ndarray:
A__ = jnp.zeros_like(UpperCamelCase_ )
A__ = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
A__ = shifted_input_ids.at[:, 0].set(UpperCamelCase_ )
A__ = jnp.where(shifted_input_ids == -1_0_0 , UpperCamelCase_ , UpperCamelCase_ )
return shifted_input_ids
class _UpperCAmelCase ( A__ ):
UpperCamelCase__ = '''mt5'''
UpperCamelCase__ = MTaConfig
class _UpperCAmelCase ( A__ ):
UpperCamelCase__ = '''mt5'''
UpperCamelCase__ = MTaConfig
class _UpperCAmelCase ( A__ ):
UpperCamelCase__ = '''mt5'''
UpperCamelCase__ = MTaConfig
| 526 | 1 |
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _UpperCamelCase( __lowerCamelCase ):
def __init__( self : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any=1_3 , SCREAMING_SNAKE_CASE__ : Optional[Any]=7 , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : str=True , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Any=True , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : List[Any]=False , SCREAMING_SNAKE_CASE__ : Tuple=False , SCREAMING_SNAKE_CASE__ : List[str]=2 , SCREAMING_SNAKE_CASE__ : List[str]=9_9 , SCREAMING_SNAKE_CASE__ : Any=0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=3_2 , SCREAMING_SNAKE_CASE__ : Any=5 , SCREAMING_SNAKE_CASE__ : Any=4 , SCREAMING_SNAKE_CASE__ : Any=0.1 , SCREAMING_SNAKE_CASE__ : List[Any]=0.1 , SCREAMING_SNAKE_CASE__ : Optional[int]=5_1_2 , SCREAMING_SNAKE_CASE__ : Tuple=1_2 , SCREAMING_SNAKE_CASE__ : Dict=2 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.02 , SCREAMING_SNAKE_CASE__ : Optional[int]=3 , SCREAMING_SNAKE_CASE__ : Any=4 , SCREAMING_SNAKE_CASE__ : List[Any]="last" , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : str=None , ):
'''simple docstring'''
__a : Dict = parent
__a : Dict = batch_size
__a : Union[str, Any] = seq_length
__a : Union[str, Any] = is_training
__a : List[Any] = use_input_lengths
__a : Any = use_token_type_ids
__a : Any = use_labels
__a : Optional[Any] = gelu_activation
__a : List[Any] = sinusoidal_embeddings
__a : int = causal
__a : Tuple = asm
__a : List[str] = n_langs
__a : Tuple = vocab_size
__a : Dict = n_special
__a : int = hidden_size
__a : Dict = num_hidden_layers
__a : Dict = num_attention_heads
__a : str = hidden_dropout_prob
__a : Optional[Any] = attention_probs_dropout_prob
__a : List[str] = max_position_embeddings
__a : Tuple = type_vocab_size
__a : str = type_sequence_label_size
__a : Dict = initializer_range
__a : int = num_labels
__a : Optional[int] = num_choices
__a : List[Any] = summary_type
__a : Any = use_proj
__a : Any = scope
def __lowerCAmelCase ( self : Any ):
'''simple docstring'''
__a : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__a : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__a : List[Any] = None
if self.use_input_lengths:
__a : List[Any] = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__a : str = None
if self.use_token_type_ids:
__a : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__a : int = None
__a : str = None
__a : Dict = None
if self.use_labels:
__a : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__a : List[Any] = ids_tensor([self.batch_size] , 2 ).float()
__a : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
__a : Dict = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def __lowerCAmelCase ( self : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : str , ):
'''simple docstring'''
__a : Tuple = FlaubertModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__a : List[Any] = model(SCREAMING_SNAKE_CASE__ , lengths=SCREAMING_SNAKE_CASE__ , langs=SCREAMING_SNAKE_CASE__ )
__a : int = model(SCREAMING_SNAKE_CASE__ , langs=SCREAMING_SNAKE_CASE__ )
__a : int = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , ):
'''simple docstring'''
__a : Any = FlaubertWithLMHeadModel(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__a : Optional[int] = model(SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , ):
'''simple docstring'''
__a : Union[str, Any] = FlaubertForQuestionAnsweringSimple(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__a : int = model(SCREAMING_SNAKE_CASE__ )
__a : Tuple = model(SCREAMING_SNAKE_CASE__ , start_positions=SCREAMING_SNAKE_CASE__ , end_positions=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , ):
'''simple docstring'''
__a : Tuple = FlaubertForQuestionAnswering(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__a : List[Any] = model(SCREAMING_SNAKE_CASE__ )
__a : List[Any] = model(
SCREAMING_SNAKE_CASE__ , start_positions=SCREAMING_SNAKE_CASE__ , end_positions=SCREAMING_SNAKE_CASE__ , cls_index=SCREAMING_SNAKE_CASE__ , is_impossible=SCREAMING_SNAKE_CASE__ , p_mask=SCREAMING_SNAKE_CASE__ , )
__a : List[Any] = model(
SCREAMING_SNAKE_CASE__ , start_positions=SCREAMING_SNAKE_CASE__ , end_positions=SCREAMING_SNAKE_CASE__ , cls_index=SCREAMING_SNAKE_CASE__ , is_impossible=SCREAMING_SNAKE_CASE__ , )
((__a) , ) : Optional[Any] = result_with_labels.to_tuple()
__a : List[Any] = model(SCREAMING_SNAKE_CASE__ , start_positions=SCREAMING_SNAKE_CASE__ , end_positions=SCREAMING_SNAKE_CASE__ )
((__a) , ) : List[Any] = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __lowerCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : int , ):
'''simple docstring'''
__a : Optional[Any] = FlaubertForSequenceClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__a : List[str] = model(SCREAMING_SNAKE_CASE__ )
__a : int = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : str , ):
'''simple docstring'''
__a : List[Any] = self.num_labels
__a : str = FlaubertForTokenClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__a : Any = model(SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self : int , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] , ):
'''simple docstring'''
__a : Any = self.num_choices
__a : List[str] = FlaubertForMultipleChoice(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
__a : Optional[int] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : Optional[int] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__a : List[Any] = model(
SCREAMING_SNAKE_CASE__ , attention_mask=SCREAMING_SNAKE_CASE__ , token_type_ids=SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
__a : Dict = self.prepare_config_and_inputs()
(
(
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) , (
__a
) ,
) : str = config_and_inputs
__a : List[str] = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'lengths': input_lengths,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class _UpperCamelCase( __lowerCamelCase , __lowerCamelCase , unittest.TestCase ):
__SCREAMING_SNAKE_CASE : str = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
__SCREAMING_SNAKE_CASE : str = (
{
'''feature-extraction''': FlaubertModel,
'''fill-mask''': FlaubertWithLMHeadModel,
'''question-answering''': FlaubertForQuestionAnsweringSimple,
'''text-classification''': FlaubertForSequenceClassification,
'''token-classification''': FlaubertForTokenClassification,
'''zero-shot''': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def __lowerCAmelCase ( self : Any , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : str ):
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __lowerCAmelCase ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Tuple=False ):
'''simple docstring'''
__a : List[Any] = super()._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , return_labels=SCREAMING_SNAKE_CASE__ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
__a : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ )
__a : Tuple = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=SCREAMING_SNAKE_CASE__ )
return inputs_dict
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
__a : Union[str, Any] = FlaubertModelTester(self )
__a : Dict = ConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , emb_dim=3_7 )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Dict ):
'''simple docstring'''
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
__a : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : str ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[Any] ):
'''simple docstring'''
__a : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*SCREAMING_SNAKE_CASE__ )
def __lowerCAmelCase ( self : Optional[int] ):
'''simple docstring'''
__a : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*SCREAMING_SNAKE_CASE__ )
@slow
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Dict = FlaubertModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
@slow
@require_torch_gpu
def __lowerCAmelCase ( self : int ):
'''simple docstring'''
__a , __a : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
__a : str = True
__a : Tuple = model_class(config=SCREAMING_SNAKE_CASE__ )
__a : Any = self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
__a : Union[str, Any] = torch.jit.trace(
SCREAMING_SNAKE_CASE__ , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(SCREAMING_SNAKE_CASE__ , os.path.join(SCREAMING_SNAKE_CASE__ , 'traced_model.pt' ) )
__a : Tuple = torch.jit.load(os.path.join(SCREAMING_SNAKE_CASE__ , 'traced_model.pt' ) , map_location=SCREAMING_SNAKE_CASE__ )
loaded(inputs_dict['input_ids'].to(SCREAMING_SNAKE_CASE__ ) , inputs_dict['attention_mask'].to(SCREAMING_SNAKE_CASE__ ) )
@require_torch
class _UpperCamelCase( unittest.TestCase ):
@slow
def __lowerCAmelCase ( self : Tuple ):
'''simple docstring'''
__a : List[str] = FlaubertModel.from_pretrained('flaubert/flaubert_base_cased' )
__a : Union[str, Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
with torch.no_grad():
__a : str = model(SCREAMING_SNAKE_CASE__ )[0]
__a : Optional[Any] = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , SCREAMING_SNAKE_CASE__ )
__a : Any = torch.tensor(
[[[-2.6_251, -1.4_298, -0.0_227], [-2.8_510, -1.6_387, 0.2_258], [-2.8_114, -1.1_832, -0.3_066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1e-4 ) )
| 47 | """simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase):
'''simple docstring'''
__magic_name__ : List[Any] = [R"""h\.\d+\.attn\.bias""", R"""h\.\d+\.attn\.masked_bias"""]
@register_to_config
def __init__( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : int , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : int = 50257 , UpperCamelCase__ : int = 1024 , UpperCamelCase__ : int = 768 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : int = 12 , UpperCamelCase__ : Optional[int] = None , UpperCamelCase__ : str = "gelu_new" , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 0.1 , UpperCamelCase__ : float = 1E-5 , UpperCamelCase__ : float = 0.02 , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = True , UpperCamelCase__ : bool = False , UpperCamelCase__ : bool = False , ):
super().__init__()
A__ : Dict =prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
F'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
F''' `n_embd`: {n_embd} are not equal.''' )
A__ : Optional[int] =prefix_inner_dim
A__ : Optional[int] =prefix_hidden_dim
A__ : Optional[int] =(
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
A__ : Optional[int] =(
nn.Linear(self.prefix_hidden_dim , UpperCamelCase__ ) if self.prefix_hidden_dim is not None else nn.Identity()
)
A__ : str =GPTaConfig(
vocab_size=UpperCamelCase__ , n_positions=UpperCamelCase__ , n_embd=UpperCamelCase__ , n_layer=UpperCamelCase__ , n_head=UpperCamelCase__ , n_inner=UpperCamelCase__ , activation_function=UpperCamelCase__ , resid_pdrop=UpperCamelCase__ , embd_pdrop=UpperCamelCase__ , attn_pdrop=UpperCamelCase__ , layer_norm_epsilon=UpperCamelCase__ , initializer_range=UpperCamelCase__ , scale_attn_weights=UpperCamelCase__ , use_cache=UpperCamelCase__ , scale_attn_by_inverse_layer_idx=UpperCamelCase__ , reorder_and_upcast_attn=UpperCamelCase__ , )
A__ : Any =GPTaLMHeadModel(UpperCamelCase__ )
def _UpperCAmelCase ( self : Any , UpperCamelCase__ : torch.Tensor , UpperCamelCase__ : torch.Tensor , UpperCamelCase__ : Optional[torch.Tensor] = None , UpperCamelCase__ : Optional[torch.Tensor] = None , ):
A__ : int =self.transformer.transformer.wte(UpperCamelCase__ )
A__ : Tuple =self.encode_prefix(UpperCamelCase__ )
A__ : Union[str, Any] =self.decode_prefix(UpperCamelCase__ )
A__ : Tuple =torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
A__ : Any =self.get_dummy_token(input_ids.shape[0] , input_ids.device )
A__ : List[Any] =torch.cat((dummy_token, input_ids) , dim=1 )
A__ : Any =self.transformer(inputs_embeds=UpperCamelCase__ , labels=UpperCamelCase__ , attention_mask=UpperCamelCase__ )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def _UpperCAmelCase ( self : Optional[int] , UpperCamelCase__ : int , UpperCamelCase__ : torch.device ):
return torch.zeros(UpperCamelCase__ , self.prefix_length , dtype=torch.intaa , device=UpperCamelCase__ )
def _UpperCAmelCase ( self : Union[str, Any] , UpperCamelCase__ : Tuple ):
return self.encode_prefix(UpperCamelCase__ )
@torch.no_grad()
def _UpperCAmelCase ( self : Tuple , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Any , UpperCamelCase__ : str ):
A__ : Optional[int] =torch.split(UpperCamelCase__ , 1 , dim=0 )
A__ : List[str] =[]
A__ : Dict =[]
for feature in features:
A__ : Any =self.decode_prefix(feature.to(UpperCamelCase__ ) ) # back to the clip feature
# Only support beam search for now
A__ , A__ : Optional[Any] =self.generate_beam(
input_embeds=UpperCamelCase__ , device=UpperCamelCase__ , eos_token_id=UpperCamelCase__ )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
A__ : Optional[Any] =torch.stack(UpperCamelCase__ )
A__ : Optional[int] =torch.stack(UpperCamelCase__ )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def _UpperCAmelCase ( self : List[Any] , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : List[str]=None , UpperCamelCase__ : Optional[int]=None , UpperCamelCase__ : int = 5 , UpperCamelCase__ : int = 67 , UpperCamelCase__ : float = 1.0 , UpperCamelCase__ : Optional[int] = None , ):
A__ : str =eos_token_id
A__ : Optional[Any] =None
A__ : int =None
A__ : Union[str, Any] =torch.ones(UpperCamelCase__ , device=UpperCamelCase__ , dtype=torch.int )
A__ : Any =torch.zeros(UpperCamelCase__ , device=UpperCamelCase__ , dtype=torch.bool )
if input_embeds is not None:
A__ : Union[str, Any] =input_embeds
else:
A__ : Optional[Any] =self.transformer.transformer.wte(UpperCamelCase__ )
for i in range(UpperCamelCase__ ):
A__ : Optional[int] =self.transformer(inputs_embeds=UpperCamelCase__ )
A__ : Tuple =outputs.logits
A__ : Union[str, Any] =logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
A__ : Optional[Any] =logits.softmax(-1 ).log()
if scores is None:
A__ , A__ : Union[str, Any] =logits.topk(UpperCamelCase__ , -1 )
A__ : Union[str, Any] =generated.expand(UpperCamelCase__ , *generated.shape[1:] )
A__ , A__ : Optional[int] =next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
A__ : str =next_tokens
else:
A__ : Optional[Any] =tokens.expand(UpperCamelCase__ , *tokens.shape[1:] )
A__ : str =torch.cat((tokens, next_tokens) , dim=1 )
else:
A__ : Union[str, Any] =-float(np.inf )
A__ : Dict =0
A__ : Optional[Any] =scores[:, None] + logits
seq_lengths[~is_stopped] += 1
A__ : Optional[Any] =scores_sum / seq_lengths[:, None]
A__ , A__ : List[Any] =scores_sum_average.view(-1 ).topk(UpperCamelCase__ , -1 )
A__ : Tuple =next_tokens // scores_sum.shape[1]
A__ : List[Any] =seq_lengths[next_tokens_source]
A__ : int =next_tokens % scores_sum.shape[1]
A__ : str =next_tokens.unsqueeze(1 )
A__ : List[Any] =tokens[next_tokens_source]
A__ : int =torch.cat((tokens, next_tokens) , dim=1 )
A__ : List[str] =generated[next_tokens_source]
A__ : Optional[Any] =scores_sum_average * seq_lengths
A__ : Optional[int] =is_stopped[next_tokens_source]
A__ : List[str] =self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
A__ : str =torch.cat((generated, next_token_embed) , dim=1 )
A__ : str =is_stopped + next_tokens.eq(UpperCamelCase__ ).squeeze()
if is_stopped.all():
break
A__ : Optional[int] =scores / seq_lengths
A__ : List[Any] =scores.argsort(descending=UpperCamelCase__ )
# tokens tensors are already padded to max_seq_length
A__ : int =[tokens[i] for i in order]
A__ : Any =torch.stack(UpperCamelCase__ , dim=0 )
A__ : int =torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 656 | 0 |
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
UpperCAmelCase__ : Dict = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowercase_ ):
def __init__( self : List[str] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Union[str, Any] ) -> None:
'''simple docstring'''
warnings.warn(
'''The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use CLIPImageProcessor instead.''' , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
| 416 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
UpperCAmelCase__ : Dict = logging.get_logger(__name__)
class lowerCAmelCase_ ( lowercase_ ):
def __init__( self : List[Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : Optional[Any] ) -> None:
'''simple docstring'''
warnings.warn(
'''The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'''
''' Please use MobileViTImageProcessor instead.''' , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ )
| 416 | 1 |
'''simple docstring'''
import numpy as np
# Importing the Keras libraries and packages
import tensorflow as tf
from tensorflow.keras import layers, models
if __name__ == "__main__":
# Initialising the CNN
# (Sequential- Building the model layer by layer)
UpperCamelCase_ = models.Sequential()
# Step 1 - Convolution
# Here 64,64 is the length & breadth of dataset images and 3 is for the RGB channel
# (3,3) is the kernel size (filter matrix)
classifier.add(
layers.ConvaD(3_2, (3, 3), input_shape=(6_4, 6_4, 3), activation="relu")
)
# Step 2 - Pooling
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Adding a second convolutional layer
classifier.add(layers.ConvaD(3_2, (3, 3), activation="relu"))
classifier.add(layers.MaxPoolingaD(pool_size=(2, 2)))
# Step 3 - Flattening
classifier.add(layers.Flatten())
# Step 4 - Full connection
classifier.add(layers.Dense(units=1_2_8, activation="relu"))
classifier.add(layers.Dense(units=1, activation="sigmoid"))
# Compiling the CNN
classifier.compile(
optimizer="adam", loss="binary_crossentropy", metrics=["accuracy"]
)
# Part 2 - Fitting the CNN to the images
# Load Trained model weights
# from keras.models import load_model
# regressor=load_model('cnn.h5')
UpperCamelCase_ = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1.0 / 2_5_5, shear_range=0.2, zoom_range=0.2, horizontal_flip=True
)
UpperCamelCase_ = tf.keras.preprocessing.image.ImageDataGenerator(rescale=1.0 / 2_5_5)
UpperCamelCase_ = train_datagen.flow_from_directory(
"dataset/training_set", target_size=(6_4, 6_4), batch_size=3_2, class_mode="binary"
)
UpperCamelCase_ = test_datagen.flow_from_directory(
"dataset/test_set", target_size=(6_4, 6_4), batch_size=3_2, class_mode="binary"
)
classifier.fit_generator(
training_set, steps_per_epoch=5, epochs=3_0, validation_data=test_set
)
classifier.save("cnn.h5")
# Part 3 - Making new predictions
UpperCamelCase_ = tf.keras.preprocessing.image.load_img(
"dataset/single_prediction/image.png", target_size=(6_4, 6_4)
)
UpperCamelCase_ = tf.keras.preprocessing.image.img_to_array(test_image)
UpperCamelCase_ = np.expand_dims(test_image, axis=0)
UpperCamelCase_ = classifier.predict(test_image)
# training_set.class_indices
if result[0][0] == 0:
UpperCamelCase_ = "Normal"
if result[0][0] == 1:
UpperCamelCase_ = "Abnormality detected"
| 28 |
'''simple docstring'''
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
snake_case_ = re.compile(R'^(?P<major>\d+)' R'\.(?P<minor>\d+)' R'\.(?P<patch>\d+)$')
@total_ordering
@dataclass
class SCREAMING_SNAKE_CASE__ :
_A = 42
_A = None
_A = None
_A = None
_A = None
def __lowerCamelCase ( self ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ : Optional[int] = _str_to_version_tuple(self.version_str )
def __repr__( self ):
"""simple docstring"""
return F"{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}"
@property
def __lowerCamelCase ( self ):
"""simple docstring"""
return self.major, self.minor, self.patch
def __lowerCamelCase ( self , lowercase__ ):
"""simple docstring"""
if isinstance(lowercase__ , lowercase__ ):
return Version(lowercase__ )
elif isinstance(lowercase__ , lowercase__ ):
return other
raise TypeError(F"{other} (type {type(lowercase__ )}) cannot be compared to version." )
def __eq__( self , lowercase__ ):
"""simple docstring"""
try:
SCREAMING_SNAKE_CASE_ : List[Any] = self._validate_operand(lowercase__ )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self._validate_operand(lowercase__ )
return self.tuple < other.tuple
def __hash__( self ):
"""simple docstring"""
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def __lowerCamelCase ( cls , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def __lowerCamelCase ( self ):
"""simple docstring"""
return self.version_str
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = _VERSION_REG.match(SCREAMING_SNAKE_CASE_ )
if not res:
raise ValueError(F"Invalid version '{version_str}'. Format should be x.y.z with {{x,y,z}} being digits." )
return tuple(int(SCREAMING_SNAKE_CASE_ ) for v in [res.group("major" ), res.group("minor" ), res.group("patch" )] )
def __lowerCamelCase ( SCREAMING_SNAKE_CASE_ : List[str] ) -> Optional[Any]:
"""simple docstring"""
return ".".join(str(SCREAMING_SNAKE_CASE_ ) for v in version_tuple )
| 421 | 0 |
import sys
_a : List[str] = (
"""73167176531330624919225119674426574742355349194934"""
"""96983520312774506326239578318016984801869478851843"""
"""85861560789112949495459501737958331952853208805511"""
"""12540698747158523863050715693290963295227443043557"""
"""66896648950445244523161731856403098711121722383113"""
"""62229893423380308135336276614282806444486645238749"""
"""30358907296290491560440772390713810515859307960866"""
"""70172427121883998797908792274921901699720888093776"""
"""65727333001053367881220235421809751254540594752243"""
"""52584907711670556013604839586446706324415722155397"""
"""53697817977846174064955149290862569321978468622482"""
"""83972241375657056057490261407972968652414535100474"""
"""82166370484403199890008895243450658541227588666881"""
"""16427171479924442928230863465674813919123162824586"""
"""17866458359124566529476545682848912883142607690042"""
"""24219022671055626321111109370544217506941658960408"""
"""07198403850962455444362981230987879927244284909188"""
"""84580156166097919133875499200524063689912560717606"""
"""05886116467109405077541002256983155200055935729725"""
"""71636269561882670428252483600823257530420752963450"""
)
def snake_case__ ( UpperCAmelCase : str = N ):
lowerCAmelCase__ :int = -sys.maxsize - 1
for i in range(len(UpperCAmelCase ) - 1_2 ):
lowerCAmelCase__ :Tuple = 1
for j in range(1_3 ):
product *= int(n[i + j] )
if product > largest_product:
lowerCAmelCase__ :int = product
return largest_product
if __name__ == "__main__":
print(f"""{solution() = }""")
| 111 |
import tempfile
import numpy as np
import torch
from transformers import AutoTokenizer, TaEncoderModel
from diffusers import DDPMScheduler, UNetaDConditionModel
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.pipelines.deepfloyd_if import IFWatermarker
from diffusers.utils.testing_utils import torch_device
from ..test_pipelines_common import to_np
class _UpperCAmelCase :
"""simple docstring"""
def snake_case_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ :List[str] = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowerCAmelCase__ :List[Any] = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowerCAmelCase__ :Tuple = UNetaDConditionModel(
sample_size=32 , layers_per_block=1 , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=3 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowerCAmelCase__ :Dict = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=_lowerCAmelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
lowerCAmelCase__ :List[str] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def snake_case_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ :int = TaEncoderModel.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowerCAmelCase__ :Dict = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-t5" )
torch.manual_seed(0 )
lowerCAmelCase__ :Union[str, Any] = UNetaDConditionModel(
sample_size=32 , layers_per_block=[1, 2] , block_out_channels=[32, 64] , down_block_types=[
"ResnetDownsampleBlock2D",
"SimpleCrossAttnDownBlock2D",
] , mid_block_type="UNetMidBlock2DSimpleCrossAttn" , up_block_types=["SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"] , in_channels=6 , out_channels=6 , cross_attention_dim=32 , encoder_hid_dim=32 , attention_head_dim=8 , addition_embed_type="text" , addition_embed_type_num_heads=2 , cross_attention_norm="group_norm" , resnet_time_scale_shift="scale_shift" , act_fn="gelu" , class_embed_type="timestep" , mid_block_scale_factor=1.414 , time_embedding_act_fn="gelu" , time_embedding_dim=32 , )
unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
torch.manual_seed(0 )
lowerCAmelCase__ :Tuple = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , thresholding=_lowerCAmelCase , dynamic_thresholding_ratio=0.95 , sample_max_value=1.0 , prediction_type="epsilon" , variance_type="learned_range" , )
torch.manual_seed(0 )
lowerCAmelCase__ :str = DDPMScheduler(
num_train_timesteps=1_000 , beta_schedule="squaredcos_cap_v2" , beta_start=0.0001 , beta_end=0.02 , )
torch.manual_seed(0 )
lowerCAmelCase__ :List[str] = IFWatermarker()
return {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"image_noising_scheduler": image_noising_scheduler,
"watermarker": watermarker,
"safety_checker": None,
"feature_extractor": None,
}
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :Optional[Any] = self.get_dummy_components()
lowerCAmelCase__ :Any = self.pipeline_class(**_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowerCAmelCase__ :int = self.get_dummy_inputs(_lowerCAmelCase )
lowerCAmelCase__ :Optional[int] = inputs["prompt"]
lowerCAmelCase__ :Tuple = inputs["generator"]
lowerCAmelCase__ :List[Any] = inputs["num_inference_steps"]
lowerCAmelCase__ :Tuple = inputs["output_type"]
if "image" in inputs:
lowerCAmelCase__ :Dict = inputs["image"]
else:
lowerCAmelCase__ :Union[str, Any] = None
if "mask_image" in inputs:
lowerCAmelCase__ :int = inputs["mask_image"]
else:
lowerCAmelCase__ :str = None
if "original_image" in inputs:
lowerCAmelCase__ :List[Any] = inputs["original_image"]
else:
lowerCAmelCase__ :Dict = None
lowerCAmelCase__ ,lowerCAmelCase__ :int = pipe.encode_prompt(_lowerCAmelCase )
# inputs with prompt converted to embeddings
lowerCAmelCase__ :Optional[Any] = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
lowerCAmelCase__ :List[Any] = image
if mask_image is not None:
lowerCAmelCase__ :Any = mask_image
if original_image is not None:
lowerCAmelCase__ :Tuple = original_image
# set all optional components to None
for optional_component in pipe._optional_components:
setattr(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowerCAmelCase__ :Optional[int] = pipe(**_lowerCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_lowerCAmelCase )
lowerCAmelCase__ :str = self.pipeline_class.from_pretrained(_lowerCAmelCase )
pipe_loaded.to(_lowerCAmelCase )
pipe_loaded.set_progress_bar_config(disable=_lowerCAmelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(_lowerCAmelCase , _lowerCAmelCase ) is None , f'''`{optional_component}` did not stay set to None after loading.''' , )
lowerCAmelCase__ :Any = self.get_dummy_inputs(_lowerCAmelCase )
lowerCAmelCase__ :Tuple = inputs["generator"]
lowerCAmelCase__ :Union[str, Any] = inputs["num_inference_steps"]
lowerCAmelCase__ :str = inputs["output_type"]
# inputs with prompt converted to embeddings
lowerCAmelCase__ :Union[str, Any] = {
"prompt_embeds": prompt_embeds,
"negative_prompt_embeds": negative_prompt_embeds,
"generator": generator,
"num_inference_steps": num_inference_steps,
"output_type": output_type,
}
if image is not None:
lowerCAmelCase__ :Tuple = image
if mask_image is not None:
lowerCAmelCase__ :str = mask_image
if original_image is not None:
lowerCAmelCase__ :Union[str, Any] = original_image
lowerCAmelCase__ :List[Any] = pipe_loaded(**_lowerCAmelCase )[0]
lowerCAmelCase__ :Dict = np.abs(to_np(_lowerCAmelCase ) - to_np(_lowerCAmelCase ) ).max()
self.assertLess(_lowerCAmelCase , 1e-4 )
def snake_case_ ( self ):
'''simple docstring'''
lowerCAmelCase__ :Tuple = self.get_dummy_components()
lowerCAmelCase__ :Optional[int] = self.pipeline_class(**_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
lowerCAmelCase__ :Optional[int] = self.get_dummy_inputs(_lowerCAmelCase )
lowerCAmelCase__ :List[str] = pipe(**_lowerCAmelCase )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(_lowerCAmelCase )
lowerCAmelCase__ :Dict = self.pipeline_class.from_pretrained(_lowerCAmelCase )
pipe_loaded.to(_lowerCAmelCase )
pipe_loaded.set_progress_bar_config(disable=_lowerCAmelCase )
pipe_loaded.unet.set_attn_processor(AttnAddedKVProcessor() ) # For reproducibility tests
lowerCAmelCase__ :Optional[int] = self.get_dummy_inputs(_lowerCAmelCase )
lowerCAmelCase__ :List[Any] = pipe_loaded(**_lowerCAmelCase )[0]
lowerCAmelCase__ :str = np.abs(to_np(_lowerCAmelCase ) - to_np(_lowerCAmelCase ) ).max()
self.assertLess(_lowerCAmelCase , 1e-4 )
| 111 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
UpperCamelCase__ : Union[str, Any] = logging.get_logger(__name__)
UpperCamelCase__ : Any = {
"bert-base-uncased": "https://huggingface.co/bert-base-uncased/resolve/main/config.json",
"bert-large-uncased": "https://huggingface.co/bert-large-uncased/resolve/main/config.json",
"bert-base-cased": "https://huggingface.co/bert-base-cased/resolve/main/config.json",
"bert-large-cased": "https://huggingface.co/bert-large-cased/resolve/main/config.json",
"bert-base-multilingual-uncased": "https://huggingface.co/bert-base-multilingual-uncased/resolve/main/config.json",
"bert-base-multilingual-cased": "https://huggingface.co/bert-base-multilingual-cased/resolve/main/config.json",
"bert-base-chinese": "https://huggingface.co/bert-base-chinese/resolve/main/config.json",
"bert-base-german-cased": "https://huggingface.co/bert-base-german-cased/resolve/main/config.json",
"bert-large-uncased-whole-word-masking": (
"https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking": (
"https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/config.json"
),
"bert-large-uncased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-large-cased-whole-word-masking-finetuned-squad": (
"https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/config.json"
),
"bert-base-cased-finetuned-mrpc": "https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/config.json",
"bert-base-german-dbmdz-cased": "https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/config.json",
"bert-base-german-dbmdz-uncased": "https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/config.json",
"cl-tohoku/bert-base-japanese": "https://huggingface.co/cl-tohoku/bert-base-japanese/resolve/main/config.json",
"cl-tohoku/bert-base-japanese-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-whole-word-masking/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char/resolve/main/config.json"
),
"cl-tohoku/bert-base-japanese-char-whole-word-masking": (
"https://huggingface.co/cl-tohoku/bert-base-japanese-char-whole-word-masking/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-cased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/config.json"
),
"TurkuNLP/bert-base-finnish-uncased-v1": (
"https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/config.json"
),
"wietsedv/bert-base-dutch-cased": "https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/config.json",
# See all BERT models at https://huggingface.co/models?filter=bert
}
class _a (_lowerCamelCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'bert'
def __init__( self , A__=3_05_22 , A__=7_68 , A__=12 , A__=12 , A__=30_72 , A__="gelu" , A__=0.1 , A__=0.1 , A__=5_12 , A__=2 , A__=0.02 , A__=1E-12 , A__=0 , A__="absolute" , A__=True , A__=None , **A__ , ) -> Union[str, Any]:
super().__init__(pad_token_id=A__ , **A__ )
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = type_vocab_size
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = position_embedding_type
_SCREAMING_SNAKE_CASE = use_cache
_SCREAMING_SNAKE_CASE = classifier_dropout
class _a (_lowerCamelCase):
"""simple docstring"""
@property
def UpperCamelCase ( self ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
_SCREAMING_SNAKE_CASE = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 591 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import subprocess
from packaging.version import Version, parse
from accelerate.commands.config.config_args import default_config_file, load_config_from_file
UpperCamelCase__ : Dict = "Run commands across TPU VMs for initial setup before running `accelerate launch`."
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_=None ) -> Dict:
"""simple docstring"""
if subparsers is not None:
_SCREAMING_SNAKE_CASE = subparsers.add_parser("""tpu-config""" , description=_description )
else:
_SCREAMING_SNAKE_CASE = argparse.ArgumentParser("""Accelerate tpu-config command""" , description=_description )
# Core arguments
_SCREAMING_SNAKE_CASE = parser.add_argument_group(
"""Config Arguments""" , """Arguments that can be configured through `accelerate config`.""" )
config_args.add_argument(
"""--config_file""" , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help="""Path to the config file to use for accelerate.""" , )
config_args.add_argument(
"""--tpu_name""" , default=SCREAMING_SNAKE_CASE_ , help="""The name of the TPU to use. If not specified, will use the TPU specified in the config file.""" , )
config_args.add_argument(
"""--tpu_zone""" , default=SCREAMING_SNAKE_CASE_ , help="""The zone of the TPU to use. If not specified, will use the zone specified in the config file.""" , )
_SCREAMING_SNAKE_CASE = parser.add_argument_group("""TPU Arguments""" , """Arguments for options ran inside the TPU.""" )
pod_args.add_argument(
"""--use_alpha""" , action="""store_true""" , help="""Whether to use `gcloud alpha` when running the TPU training script instead of `gcloud`.""" , )
pod_args.add_argument(
"""--command_file""" , default=SCREAMING_SNAKE_CASE_ , help="""The path to the file containing the commands to run on the pod on startup.""" , )
pod_args.add_argument(
"""--command""" , action="""append""" , nargs="""+""" , help="""A command to run on the pod. Can be passed multiple times.""" , )
pod_args.add_argument(
"""--install_accelerate""" , action="""store_true""" , help="""Whether to install accelerate on the pod. Defaults to False.""" , )
pod_args.add_argument(
"""--accelerate_version""" , default="""latest""" , help="""The version of accelerate to install on the pod. If not specified, will use the latest pypi version. Specify 'dev' to install from GitHub.""" , )
pod_args.add_argument(
"""--debug""" , action="""store_true""" , help="""If set, will print the command that would be run instead of running it.""" )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
return parser
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> Any:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = None
# Get the default from the config file if it exists.
if args.config_file is not None or os.path.isfile(SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = load_config_from_file(args.config_file )
if not args.command_file and defaults.command_file is not None and not args.command:
_SCREAMING_SNAKE_CASE = defaults.command_file
if not args.command and defaults.commands is not None:
_SCREAMING_SNAKE_CASE = defaults.commands
if not args.tpu_name:
_SCREAMING_SNAKE_CASE = defaults.tpu_name
if not args.tpu_zone:
_SCREAMING_SNAKE_CASE = defaults.tpu_zone
if args.accelerate_version == "dev":
_SCREAMING_SNAKE_CASE = """git+https://github.com/huggingface/accelerate.git"""
elif args.accelerate_version == "latest":
_SCREAMING_SNAKE_CASE = """accelerate -U"""
elif isinstance(parse(args.accelerate_version ) , SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = F"accelerate=={args.accelerate_version}"
if not args.command_file and not args.command:
raise ValueError("""You must specify either a command file or a command to run on the pod.""" )
if args.command_file:
with open(args.command_file , """r""" ) as f:
_SCREAMING_SNAKE_CASE = [f.read().splitlines()]
# To turn list of lists into list of strings
if isinstance(args.command[0] , SCREAMING_SNAKE_CASE_ ):
_SCREAMING_SNAKE_CASE = [line for cmd in args.command for line in cmd]
# Default to the shared folder and install accelerate
_SCREAMING_SNAKE_CASE = ["""cd /usr/share"""]
if args.install_accelerate:
new_cmd += [F"pip install {args.accelerate_version}"]
new_cmd += args.command
_SCREAMING_SNAKE_CASE = """; """.join(SCREAMING_SNAKE_CASE_ )
# Then send it to gcloud
# Eventually try to use google-api-core to do this instead of subprocess
_SCREAMING_SNAKE_CASE = ["""gcloud"""]
if args.use_alpha:
cmd += ["alpha"]
cmd += [
"compute",
"tpus",
"tpu-vm",
"ssh",
args.tpu_name,
"--zone",
args.tpu_zone,
"--command",
args.command,
"--worker",
"all",
]
if args.debug:
print(F"Running {' '.join(SCREAMING_SNAKE_CASE_ )}" )
return
subprocess.run(SCREAMING_SNAKE_CASE_ )
print("""Successfully setup pod.""" )
def lowerCAmelCase_ ( ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE = tpu_command_parser()
_SCREAMING_SNAKE_CASE = parser.parse_args()
tpu_command_launcher(SCREAMING_SNAKE_CASE_ )
| 591 | 1 |
a_ = 0 # The first color of the flag.
a_ = 1 # The second color of the flag.
a_ = 2 # The third color of the flag.
a_ = (red, white, blue)
def _a ( UpperCamelCase_ : list ) -> list:
"""simple docstring"""
if not sequence:
return []
if len(UpperCamelCase_ ) == 1:
return list(UpperCamelCase_ )
lowerCAmelCase__ = 0
lowerCAmelCase__ = len(UpperCamelCase_ ) - 1
lowerCAmelCase__ = 0
while mid <= high:
if sequence[mid] == colors[0]:
lowerCAmelCase__ , lowerCAmelCase__ = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
lowerCAmelCase__ , lowerCAmelCase__ = sequence[high], sequence[mid]
high -= 1
else:
lowerCAmelCase__ = F"The elements inside the sequence must contains only {colors} values"
raise ValueError(UpperCamelCase_ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = input('''Enter numbers separated by commas:\n''').strip()
a_ = [int(item.strip()) for item in user_input.split(''',''')]
print(F"{dutch_national_flag_sort(unsorted)}")
| 115 |
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--txt2img_unclip''',
default='''kakaobrain/karlo-v1-alpha''',
type=str,
required=False,
help='''The pretrained txt2img unclip.''',
)
a_ = parser.parse_args()
a_ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
a_ = CLIPImageProcessor()
a_ = CLIPVisionModelWithProjection.from_pretrained('''openai/clip-vit-large-patch14''')
a_ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 115 | 1 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/text-classification/requirements.txt""")
lowerCAmelCase : List[Any] = logging.getLogger(__name__)
@dataclass
class _UpperCamelCase :
'''simple docstring'''
_snake_case = field(
default=128 , metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
} , )
_snake_case = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''})
_snake_case = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''Whether to pad all samples to `max_seq_length`. '''
'''If False, will pad the samples dynamically when batching to the maximum length in the batch.'''
)
} , )
_snake_case = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
_snake_case = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
_snake_case = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of prediction examples to this '''
'''value if set.'''
)
} , )
@dataclass
class _UpperCamelCase :
'''simple docstring'''
_snake_case = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''})
_snake_case = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Evaluation language. Also train language if `train_language` is set to None.'''})
_snake_case = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Train language if it is different from the evaluation language.'''})
_snake_case = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''})
_snake_case = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Pretrained tokenizer name or path if not the same as model_name'''})
_snake_case = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
_snake_case = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''arg to indicate if tokenizer should do lower case in AutoTokenizer.from_pretrained()'''} , )
_snake_case = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'''} , )
_snake_case = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
_snake_case = field(
default=SCREAMING_SNAKE_CASE , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
_snake_case = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def _A ( ) -> List[Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
lowercase : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
lowercase , lowercase , lowercase : List[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_xnli" ,A )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" ,datefmt="%m/%d/%Y %H:%M:%S" ,handlers=[logging.StreamHandler(sys.stdout )] ,)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
lowercase : List[str] = training_args.get_process_log_level()
logger.setLevel(A )
datasets.utils.logging.set_verbosity(A )
transformers.utils.logging.set_verbosity(A )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
lowercase : List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
lowercase : Dict = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# In distributed training, the load_dataset function guarantees that only one local process can concurrently
# download the dataset.
# Downloading and loading xnli dataset from the hub.
if training_args.do_train:
if model_args.train_language is None:
lowercase : Tuple = load_dataset(
"xnli" ,model_args.language ,split="train" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
else:
lowercase : Dict = load_dataset(
"xnli" ,model_args.train_language ,split="train" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
lowercase : List[str] = train_dataset.features["label"].names
if training_args.do_eval:
lowercase : List[Any] = load_dataset(
"xnli" ,model_args.language ,split="validation" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
lowercase : Any = eval_dataset.features["label"].names
if training_args.do_predict:
lowercase : Union[str, Any] = load_dataset(
"xnli" ,model_args.language ,split="test" ,cache_dir=model_args.cache_dir ,use_auth_token=True if model_args.use_auth_token else None ,)
lowercase : Optional[Any] = predict_dataset.features["label"].names
# Labels
lowercase : Tuple = len(A )
# Load pretrained model and tokenizer
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
lowercase : int = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path ,num_labels=A ,idalabel={str(A ): label for i, label in enumerate(A )} ,labelaid={label: i for i, label in enumerate(A )} ,finetuning_task="xnli" ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
lowercase : Tuple = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path ,do_lower_case=model_args.do_lower_case ,cache_dir=model_args.cache_dir ,use_fast=model_args.use_fast_tokenizer ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,)
lowercase : List[Any] = AutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path ,from_tf=bool(".ckpt" in model_args.model_name_or_path ) ,config=A ,cache_dir=model_args.cache_dir ,revision=model_args.model_revision ,use_auth_token=True if model_args.use_auth_token else None ,ignore_mismatched_sizes=model_args.ignore_mismatched_sizes ,)
# Preprocessing the datasets
# Padding strategy
if data_args.pad_to_max_length:
lowercase : Dict = "max_length"
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
lowercase : str = False
def preprocess_function(A ):
# Tokenize the texts
return tokenizer(
examples["premise"] ,examples["hypothesis"] ,padding=A ,max_length=data_args.max_seq_length ,truncation=A ,)
if training_args.do_train:
if data_args.max_train_samples is not None:
lowercase : Tuple = min(len(A ) ,data_args.max_train_samples )
lowercase : Optional[int] = train_dataset.select(range(A ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
lowercase : Union[str, Any] = train_dataset.map(
A ,batched=A ,load_from_cache_file=not data_args.overwrite_cache ,desc="Running tokenizer on train dataset" ,)
# Log a few random samples from the training set:
for index in random.sample(range(len(A ) ) ,3 ):
logger.info(F'''Sample {index} of the training set: {train_dataset[index]}.''' )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
lowercase : str = min(len(A ) ,data_args.max_eval_samples )
lowercase : Optional[int] = eval_dataset.select(range(A ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
lowercase : Dict = eval_dataset.map(
A ,batched=A ,load_from_cache_file=not data_args.overwrite_cache ,desc="Running tokenizer on validation dataset" ,)
if training_args.do_predict:
if data_args.max_predict_samples is not None:
lowercase : Tuple = min(len(A ) ,data_args.max_predict_samples )
lowercase : Tuple = predict_dataset.select(range(A ) )
with training_args.main_process_first(desc="prediction dataset map pre-processing" ):
lowercase : Optional[int] = predict_dataset.map(
A ,batched=A ,load_from_cache_file=not data_args.overwrite_cache ,desc="Running tokenizer on prediction dataset" ,)
# Get the metric function
lowercase : Optional[Any] = evaluate.load("xnli" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(A ):
lowercase : Tuple = p.predictions[0] if isinstance(p.predictions ,A ) else p.predictions
lowercase : Union[str, Any] = np.argmax(A ,axis=1 )
return metric.compute(predictions=A ,references=p.label_ids )
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
lowercase : Tuple = default_data_collator
elif training_args.fpaa:
lowercase : List[str] = DataCollatorWithPadding(A ,pad_to_multiple_of=8 )
else:
lowercase : List[str] = None
# Initialize our Trainer
lowercase : Optional[int] = Trainer(
model=A ,args=A ,train_dataset=train_dataset if training_args.do_train else None ,eval_dataset=eval_dataset if training_args.do_eval else None ,compute_metrics=A ,tokenizer=A ,data_collator=A ,)
# Training
if training_args.do_train:
lowercase : Tuple = None
if training_args.resume_from_checkpoint is not None:
lowercase : Optional[int] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
lowercase : Tuple = last_checkpoint
lowercase : Dict = trainer.train(resume_from_checkpoint=A )
lowercase : str = train_result.metrics
lowercase : Optional[Any] = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(A )
)
lowercase : Tuple = min(A ,len(A ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("train" ,A )
trainer.save_metrics("train" ,A )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
lowercase : Optional[int] = trainer.evaluate(eval_dataset=A )
lowercase : Optional[int] = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(A )
lowercase : Union[str, Any] = min(A ,len(A ) )
trainer.log_metrics("eval" ,A )
trainer.save_metrics("eval" ,A )
# Prediction
if training_args.do_predict:
logger.info("*** Predict ***" )
lowercase , lowercase , lowercase : Dict = trainer.predict(A ,metric_key_prefix="predict" )
lowercase : Dict = (
data_args.max_predict_samples if data_args.max_predict_samples is not None else len(A )
)
lowercase : str = min(A ,len(A ) )
trainer.log_metrics("predict" ,A )
trainer.save_metrics("predict" ,A )
lowercase : List[str] = np.argmax(A ,axis=1 )
lowercase : List[Any] = os.path.join(training_args.output_dir ,"predictions.txt" )
if trainer.is_world_process_zero():
with open(A ,"w" ) as writer:
writer.write("index\tprediction\n" )
for index, item in enumerate(A ):
lowercase : Optional[Any] = label_list[item]
writer.write(F'''{index}\t{item}\n''' )
if __name__ == "__main__":
main()
| 372 |
'''simple docstring'''
lowerCAmelCase : List[str] = 2_5_6
# Modulus to hash a string
lowerCAmelCase : Tuple = 1_0_0_0_0_0_3
def _A ( A ,A ) -> bool:
lowercase : List[Any] = len(A )
lowercase : List[Any] = len(A )
if p_len > t_len:
return False
lowercase : List[str] = 0
lowercase : Dict = 0
lowercase : Any = 1
# Calculating the hash of pattern and substring of text
for i in range(A ):
lowercase : Optional[int] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
lowercase : Any = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
lowercase : List[str] = (modulus_power * alphabet_size) % modulus
for i in range(0 ,t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
lowercase : Any = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def _A ( ) -> None:
lowercase : Dict = "abc1abc12"
lowercase : Union[str, Any] = "alskfjaldsabc1abc1abc12k23adsfabcabc"
lowercase : Any = "alskfjaldsk23adsfabcabc"
assert rabin_karp(A ,A ) and not rabin_karp(A ,A )
# Test 2)
lowercase : str = "ABABX"
lowercase : Union[str, Any] = "ABABZABABYABABX"
assert rabin_karp(A ,A )
# Test 3)
lowercase : str = "AAAB"
lowercase : List[str] = "ABAAAAAB"
assert rabin_karp(A ,A )
# Test 4)
lowercase : List[str] = "abcdabcy"
lowercase : str = "abcxabcdabxabcdabcdabcy"
assert rabin_karp(A ,A )
# Test 5)
lowercase : int = "Lü"
lowercase : Optional[Any] = "Lüsai"
assert rabin_karp(A ,A )
lowercase : Tuple = "Lue"
assert not rabin_karp(A ,A )
print("Success." )
if __name__ == "__main__":
test_rabin_karp()
| 372 | 1 |
'''simple docstring'''
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
lowercase : Any = logging.get_logger(__name__)
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : Optional[int] = WavaVecaForSequenceClassification.from_pretrained(snake_case__ , config=snake_case__ )
A : int = downstream_dict['''projector.weight''']
A : Optional[Any] = downstream_dict['''projector.bias''']
A : str = downstream_dict['''model.post_net.linear.weight''']
A : Optional[Any] = downstream_dict['''model.post_net.linear.bias''']
return model
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : Any = WavaVecaForAudioFrameClassification.from_pretrained(snake_case__ , config=snake_case__ )
A : List[str] = downstream_dict['''model.linear.weight''']
A : int = downstream_dict['''model.linear.bias''']
return model
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : Union[str, Any] = WavaVecaForXVector.from_pretrained(snake_case__ , config=snake_case__ )
A : Union[str, Any] = downstream_dict['''connector.weight''']
A : Union[str, Any] = downstream_dict['''connector.bias''']
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
A : Tuple = downstream_dict[
F'model.framelevel_feature_extractor.module.{i}.kernel.weight'
]
A : int = downstream_dict[F'model.framelevel_feature_extractor.module.{i}.kernel.bias']
A : Union[str, Any] = downstream_dict['''model.utterancelevel_feature_extractor.linear1.weight''']
A : List[str] = downstream_dict['''model.utterancelevel_feature_extractor.linear1.bias''']
A : Union[str, Any] = downstream_dict['''model.utterancelevel_feature_extractor.linear2.weight''']
A : Dict = downstream_dict['''model.utterancelevel_feature_extractor.linear2.bias''']
A : Any = downstream_dict['''objective.W''']
return model
@torch.no_grad()
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : int = torch.load(snake_case__ , map_location='''cpu''' )
A : int = checkpoint['''Downstream''']
A : Any = WavaVecaConfig.from_pretrained(snake_case__ )
A : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(
snake_case__ , return_attention_mask=snake_case__ , do_normalize=snake_case__ )
A : List[str] = hf_config.architectures[0]
if arch.endswith('''ForSequenceClassification''' ):
A : int = convert_classification(snake_case__ , snake_case__ , snake_case__ )
elif arch.endswith('''ForAudioFrameClassification''' ):
A : int = convert_diarization(snake_case__ , snake_case__ , snake_case__ )
elif arch.endswith('''ForXVector''' ):
A : str = convert_xvector(snake_case__ , snake_case__ , snake_case__ )
else:
raise NotImplementedError(F'S3PRL weights conversion is not supported for {arch}' )
if hf_config.use_weighted_layer_sum:
A : int = checkpoint['''Featurizer''']['''weights''']
hf_feature_extractor.save_pretrained(snake_case__ )
hf_model.save_pretrained(snake_case__ )
if __name__ == "__main__":
lowercase : str = argparse.ArgumentParser()
parser.add_argument(
'--base_model_name', default=None, type=str, help='Name of the huggingface pretrained base model.'
)
parser.add_argument('--config_path', default=None, type=str, help='Path to the huggingface classifier config.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to the s3prl checkpoint.')
parser.add_argument('--model_dump_path', default=None, type=str, help='Path to the final converted model.')
lowercase : Any = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 700 |
'''simple docstring'''
def lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
if len(snake_case__ ) <= 1:
return [tuple(snake_case__ )]
A : Dict = []
def generate(snake_case__ , snake_case__ ):
if k == 1:
res.append(tuple(arr[:] ) )
return
generate(k - 1 , snake_case__ )
for i in range(k - 1 ):
if k % 2 == 0: # k is even
A, A : Optional[int] = arr[k - 1], arr[i]
else: # k is odd
A, A : List[Any] = arr[k - 1], arr[0]
generate(k - 1 , snake_case__ )
generate(len(snake_case__ ) , snake_case__ )
return res
if __name__ == "__main__":
lowercase : Tuple = input('Enter numbers separated by a comma:\n').strip()
lowercase : Optional[Any] = [int(item) for item in user_input.split(',')]
print(heaps(arr))
| 343 | 0 |
"""simple docstring"""
import warnings
from transformers import AutoTokenizer
from transformers.utils import is_torch_available
from transformers.utils.generic import ExplicitEnum
from ...processing_utils import ProcessorMixin
if is_torch_available():
import torch
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Tuple = "char"
a__ : List[Any] = "bpe"
a__ : str = "wp"
_lowercase : int = (DecodeType.CHARACTER, DecodeType.BPE, DecodeType.WORDPIECE)
class _UpperCAmelCase ( _lowerCAmelCase ):
a__ : Optional[Any] = ["image_processor", "char_tokenizer"]
a__ : Dict = "ViTImageProcessor"
a__ : Dict = "MgpstrTokenizer"
def __init__( self : Union[str, Any] , _lowercase : Optional[int]=None , _lowercase : Union[str, Any]=None , **_lowercase : List[str] ):
__UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , _lowercase , )
__UpperCAmelCase = kwargs.pop('''feature_extractor''' )
__UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
__UpperCAmelCase = tokenizer
__UpperCAmelCase = AutoTokenizer.from_pretrained('''gpt2''' )
__UpperCAmelCase = AutoTokenizer.from_pretrained('''bert-base-uncased''' )
super().__init__(_lowercase , _lowercase )
def __call__( self : Union[str, Any] , _lowercase : Optional[int]=None , _lowercase : Optional[int]=None , _lowercase : Tuple=None , **_lowercase : Tuple ):
if images is None and text is None:
raise ValueError('''You need to specify either an `images` or `text` input to process.''' )
if images is not None:
__UpperCAmelCase = self.image_processor(_lowercase , return_tensors=_lowercase , **_lowercase )
if text is not None:
__UpperCAmelCase = self.char_tokenizer(_lowercase , return_tensors=_lowercase , **_lowercase )
if text is None:
return inputs
elif images is None:
return encodings
else:
__UpperCAmelCase = encodings['''input_ids''']
return inputs
def a ( self : Tuple , _lowercase : Tuple ):
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = sequences
__UpperCAmelCase = char_preds.size(0 )
__UpperCAmelCase , __UpperCAmelCase = self._decode_helper(_lowercase , '''char''' )
__UpperCAmelCase , __UpperCAmelCase = self._decode_helper(_lowercase , '''bpe''' )
__UpperCAmelCase , __UpperCAmelCase = self._decode_helper(_lowercase , '''wp''' )
__UpperCAmelCase = []
__UpperCAmelCase = []
for i in range(_lowercase ):
__UpperCAmelCase = [char_scores[i], bpe_scores[i], wp_scores[i]]
__UpperCAmelCase = [char_strs[i], bpe_strs[i], wp_strs[i]]
__UpperCAmelCase = scores.index(max(_lowercase ) )
final_strs.append(strs[max_score_index] )
final_scores.append(scores[max_score_index] )
__UpperCAmelCase = {}
__UpperCAmelCase = final_strs
__UpperCAmelCase = final_scores
__UpperCAmelCase = char_strs
__UpperCAmelCase = bpe_strs
__UpperCAmelCase = wp_strs
return out
def a ( self : Union[str, Any] , _lowercase : Any , _lowercase : str ):
if format == DecodeType.CHARACTER:
__UpperCAmelCase = self.char_decode
__UpperCAmelCase = 1
__UpperCAmelCase = '''[s]'''
elif format == DecodeType.BPE:
__UpperCAmelCase = self.bpe_decode
__UpperCAmelCase = 2
__UpperCAmelCase = '''#'''
elif format == DecodeType.WORDPIECE:
__UpperCAmelCase = self.wp_decode
__UpperCAmelCase = 1_02
__UpperCAmelCase = '''[SEP]'''
else:
raise ValueError(F'''Format {format} is not supported.''' )
__UpperCAmelCase , __UpperCAmelCase = [], []
__UpperCAmelCase = pred_logits.size(0 )
__UpperCAmelCase = pred_logits.size(1 )
__UpperCAmelCase , __UpperCAmelCase = pred_logits.topk(1 , dim=-1 , largest=_lowercase , sorted=_lowercase )
__UpperCAmelCase = preds_index.view(-1 , _lowercase )[:, 1:]
__UpperCAmelCase = decoder(_lowercase )
__UpperCAmelCase , __UpperCAmelCase = torch.nn.functional.softmax(_lowercase , dim=2 ).max(dim=2 )
__UpperCAmelCase = preds_max_prob[:, 1:]
for index in range(_lowercase ):
__UpperCAmelCase = preds_str[index].find(_lowercase )
__UpperCAmelCase = preds_str[index][:pred_eos]
__UpperCAmelCase = preds_index[index].cpu().tolist()
__UpperCAmelCase = pred_index.index(_lowercase ) if eos_token in pred_index else -1
__UpperCAmelCase = preds_max_prob[index][: pred_eos_index + 1]
__UpperCAmelCase = pred_max_prob.cumprod(dim=0 )[-1] if pred_max_prob.nelement() != 0 else 0.0
dec_strs.append(_lowercase )
conf_scores.append(_lowercase )
return dec_strs, conf_scores
def a ( self : List[Any] , _lowercase : int ):
__UpperCAmelCase = [seq.replace(''' ''' , '''''' ) for seq in self.char_tokenizer.batch_decode(_lowercase )]
return decode_strs
def a ( self : Optional[Any] , _lowercase : str ):
return self.bpe_tokenizer.batch_decode(_lowercase )
def a ( self : int , _lowercase : str ):
__UpperCAmelCase = [seq.replace(''' ''' , '''''' ) for seq in self.wp_tokenizer.batch_decode(_lowercase )]
return decode_strs
| 49 |
"""simple docstring"""
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
def a ( self : Union[str, Any] ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase )
__UpperCAmelCase = tokenizer.decode(greedy_ids[0] )
with CaptureStdout() as cs:
__UpperCAmelCase = TextStreamer(_lowercase )
model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase , streamer=_lowercase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__UpperCAmelCase = cs.out[:-1]
self.assertEqual(_lowercase , _lowercase )
def a ( self : Optional[Any] ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase )
__UpperCAmelCase = tokenizer.decode(greedy_ids[0] )
__UpperCAmelCase = TextIteratorStreamer(_lowercase )
__UpperCAmelCase = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
__UpperCAmelCase = Thread(target=model.generate , kwargs=_lowercase )
thread.start()
__UpperCAmelCase = ''''''
for new_text in streamer:
streamer_text += new_text
self.assertEqual(_lowercase , _lowercase )
def a ( self : str ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase )
__UpperCAmelCase = greedy_ids[:, input_ids.shape[1] :]
__UpperCAmelCase = tokenizer.decode(new_greedy_ids[0] )
with CaptureStdout() as cs:
__UpperCAmelCase = TextStreamer(_lowercase , skip_prompt=_lowercase )
model.generate(_lowercase , max_new_tokens=10 , do_sample=_lowercase , streamer=_lowercase )
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
__UpperCAmelCase = cs.out[:-1]
self.assertEqual(_lowercase , _lowercase )
def a ( self : Tuple ):
# Tests that we can pass `decode_kwargs` to the streamer to control how the tokens are decoded. Must be tested
# with actual models -- the dummy models' tokenizers are not aligned with their models, and
# `skip_special_tokens=True` has no effect on them
__UpperCAmelCase = AutoTokenizer.from_pretrained('''distilgpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''distilgpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = torch.ones((1, 5) , device=_lowercase ).long() * model.config.bos_token_id
with CaptureStdout() as cs:
__UpperCAmelCase = TextStreamer(_lowercase , skip_special_tokens=_lowercase )
model.generate(_lowercase , max_new_tokens=1 , do_sample=_lowercase , streamer=_lowercase )
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
__UpperCAmelCase = cs.out[:-1] # Remove the final "\n"
__UpperCAmelCase = tokenizer(_lowercase , return_tensors='''pt''' )
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1) )
def a ( self : Tuple ):
__UpperCAmelCase = AutoTokenizer.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' )
__UpperCAmelCase = AutoModelForCausalLM.from_pretrained('''hf-internal-testing/tiny-random-gpt2''' ).to(_lowercase )
__UpperCAmelCase = -1
__UpperCAmelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size ).to(_lowercase )
__UpperCAmelCase = TextIteratorStreamer(_lowercase , timeout=0.001 )
__UpperCAmelCase = {'''input_ids''': input_ids, '''max_new_tokens''': 10, '''do_sample''': False, '''streamer''': streamer}
__UpperCAmelCase = Thread(target=model.generate , kwargs=_lowercase )
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(_lowercase ):
__UpperCAmelCase = ''''''
for new_text in streamer:
streamer_text += new_text
| 49 | 1 |
"""simple docstring"""
import argparse
from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
from diffusers import UnCLIPImageVariationPipeline, UnCLIPPipeline
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--txt2img_unclip',
default='kakaobrain/karlo-v1-alpha',
type=str,
required=False,
help='The pretrained txt2img unclip.',
)
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = UnCLIPPipeline.from_pretrained(args.txtaimg_unclip)
UpperCamelCase__ = CLIPImageProcessor()
UpperCamelCase__ = CLIPVisionModelWithProjection.from_pretrained('openai/clip-vit-large-patch14')
UpperCamelCase__ = UnCLIPImageVariationPipeline(
decoder=txtaimg.decoder,
text_encoder=txtaimg.text_encoder,
tokenizer=txtaimg.tokenizer,
text_proj=txtaimg.text_proj,
feature_extractor=feature_extractor,
image_encoder=image_encoder,
super_res_first=txtaimg.super_res_first,
super_res_last=txtaimg.super_res_last,
decoder_scheduler=txtaimg.decoder_scheduler,
super_res_scheduler=txtaimg.super_res_scheduler,
)
imgaimg.save_pretrained(args.dump_path)
| 254 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
UpperCamelCase__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase__ = {
'vocab_file': {
'unc-nlp/lxmert-base-uncased': 'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt',
},
'tokenizer_file': {
'unc-nlp/lxmert-base-uncased': (
'https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json'
),
},
}
UpperCamelCase__ = {
'unc-nlp/lxmert-base-uncased': 5_12,
}
UpperCamelCase__ = {
'unc-nlp/lxmert-base-uncased': {'do_lower_case': True},
}
class a ( lowercase ):
UpperCamelCase : int = VOCAB_FILES_NAMES
UpperCamelCase : str = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase : Any = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase : List[Any] = LxmertTokenizer
def __init__( self , UpperCamelCase_=None , UpperCamelCase_=None , UpperCamelCase_=True , UpperCamelCase_="[UNK]" , UpperCamelCase_="[SEP]" , UpperCamelCase_="[PAD]" , UpperCamelCase_="[CLS]" , UpperCamelCase_="[MASK]" , UpperCamelCase_=True , UpperCamelCase_=None , **UpperCamelCase_ , ):
super().__init__(
UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , tokenize_chinese_chars=UpperCamelCase_ , strip_accents=UpperCamelCase_ , **UpperCamelCase_ , )
UpperCAmelCase__ : Any = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , UpperCamelCase_ ) != do_lower_case
or normalizer_state.get('strip_accents' , UpperCamelCase_ ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , UpperCamelCase_ ) != tokenize_chinese_chars
):
UpperCAmelCase__ : Any = getattr(UpperCamelCase_ , normalizer_state.pop('type' ) )
UpperCAmelCase__ : Union[str, Any] = do_lower_case
UpperCAmelCase__ : Optional[int] = strip_accents
UpperCAmelCase__ : Optional[Any] = tokenize_chinese_chars
UpperCAmelCase__ : Dict = normalizer_class(**UpperCamelCase_ )
UpperCAmelCase__ : List[Any] = do_lower_case
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_=None ):
UpperCAmelCase__ : Union[str, Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
UpperCAmelCase__ : str = [self.sep_token_id]
UpperCAmelCase__ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __snake_case ( self , UpperCamelCase_ , UpperCamelCase_ = None ):
UpperCAmelCase__ : Optional[Any] = self._tokenizer.model.save(UpperCamelCase_ , name=UpperCamelCase_ )
return tuple(UpperCamelCase_ )
| 254 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int | float:
if len(lowerCamelCase__ ) == 0:
raise ValueError('find_max() arg is an empty sequence' )
if (
left >= len(lowerCamelCase__ )
or left < -len(lowerCamelCase__ )
or right >= len(lowerCamelCase__ )
or right < -len(lowerCamelCase__ )
):
raise IndexError('list index out of range' )
if left == right:
return nums[left]
__lowerCamelCase : Any = (left + right) >> 1 # the middle
__lowerCamelCase : int = find_max(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) # find max in range[left, mid]
__lowerCamelCase : int = find_max(lowerCamelCase__ , mid + 1 , lowerCamelCase__ ) # find max in range[mid + 1, right]
return left_max if left_max >= right_max else right_max
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 652 |
import math
def SCREAMING_SNAKE_CASE__ ( lowerCamelCase__ ) -> int:
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
__lowerCamelCase : List[str] = F"Input value of [number={number}] must be an integer"
raise TypeError(lowerCamelCase__ )
if number < 1:
__lowerCamelCase : int = F"Input value of [number={number}] must be > 0"
raise ValueError(lowerCamelCase__ )
elif number == 1:
return 3
elif number == 2:
return 5
else:
__lowerCamelCase : Any = int(math.log(number // 3 , 2 ) ) + 2
__lowerCamelCase : List[Any] = [3, 5]
__lowerCamelCase : Union[str, Any] = 2
__lowerCamelCase : List[str] = 3
for block in range(1 , lowerCamelCase__ ):
for _ in range(lowerCamelCase__ ):
proth_list.append(2 ** (block + 1) + proth_list[proth_index - 1] )
proth_index += 1
increment *= 2
return proth_list[number - 1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for number in range(11):
a =0
try:
a =proth(number)
except ValueError:
print(F"""ValueError: there is no {number}th Proth number""")
continue
print(F"""The {number}th Proth number: {value}""")
| 652 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
_lowerCAmelCase : Dict = {
"albert-base-v1": "https://huggingface.co/albert-base-v1/resolve/main/config.json",
"albert-large-v1": "https://huggingface.co/albert-large-v1/resolve/main/config.json",
"albert-xlarge-v1": "https://huggingface.co/albert-xlarge-v1/resolve/main/config.json",
"albert-xxlarge-v1": "https://huggingface.co/albert-xxlarge-v1/resolve/main/config.json",
"albert-base-v2": "https://huggingface.co/albert-base-v2/resolve/main/config.json",
"albert-large-v2": "https://huggingface.co/albert-large-v2/resolve/main/config.json",
"albert-xlarge-v2": "https://huggingface.co/albert-xlarge-v2/resolve/main/config.json",
"albert-xxlarge-v2": "https://huggingface.co/albert-xxlarge-v2/resolve/main/config.json",
}
class _UpperCamelCase ( UpperCAmelCase__ ):
UpperCAmelCase_ = 'albert'
def __init__( self :Dict , lowerCamelCase :List[str]=3_0000 , lowerCamelCase :List[Any]=128 , lowerCamelCase :List[str]=4096 , lowerCamelCase :str=12 , lowerCamelCase :str=1 , lowerCamelCase :Tuple=64 , lowerCamelCase :Dict=1_6384 , lowerCamelCase :int=1 , lowerCamelCase :str="gelu_new" , lowerCamelCase :Dict=0 , lowerCamelCase :Optional[Any]=0 , lowerCamelCase :str=512 , lowerCamelCase :Optional[int]=2 , lowerCamelCase :List[Any]=0.02 , lowerCamelCase :Union[str, Any]=1e-12 , lowerCamelCase :Tuple=0.1 , lowerCamelCase :List[Any]="absolute" , lowerCamelCase :List[Any]=0 , lowerCamelCase :int=2 , lowerCamelCase :Optional[int]=3 , **lowerCamelCase :int , ) -> Tuple:
super().__init__(pad_token_id=lowerCamelCase , bos_token_id=lowerCamelCase , eos_token_id=lowerCamelCase , **lowerCamelCase )
UpperCAmelCase__ = vocab_size
UpperCAmelCase__ = embedding_size
UpperCAmelCase__ = hidden_size
UpperCAmelCase__ = num_hidden_layers
UpperCAmelCase__ = num_hidden_groups
UpperCAmelCase__ = num_attention_heads
UpperCAmelCase__ = inner_group_num
UpperCAmelCase__ = hidden_act
UpperCAmelCase__ = intermediate_size
UpperCAmelCase__ = hidden_dropout_prob
UpperCAmelCase__ = attention_probs_dropout_prob
UpperCAmelCase__ = max_position_embeddings
UpperCAmelCase__ = type_vocab_size
UpperCAmelCase__ = initializer_range
UpperCAmelCase__ = layer_norm_eps
UpperCAmelCase__ = classifier_dropout_prob
UpperCAmelCase__ = position_embedding_type
class _UpperCamelCase ( UpperCAmelCase__ ):
@property
def UpperCAmelCase_ ( self :Optional[int] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
UpperCAmelCase__ = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
UpperCAmelCase__ = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 714 |
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class _UpperCamelCase ( lowerCAmelCase ):
# to overwrite at feature extractactor specific tests
UpperCAmelCase_ = None
UpperCAmelCase_ = None
@property
def UpperCAmelCase_ ( self :int ) -> int:
return self.feat_extract_tester.prepare_feat_extract_dict()
def UpperCAmelCase_ ( self :Any ) -> str:
UpperCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(lowerCamelCase , "feature_size" ) )
self.assertTrue(hasattr(lowerCamelCase , "sampling_rate" ) )
self.assertTrue(hasattr(lowerCamelCase , "padding_value" ) )
def UpperCAmelCase_ ( self :str ) -> int:
UpperCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_common()
UpperCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase__ = feat_extract.model_input_names[0]
UpperCAmelCase__ = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(lowerCamelCase ) == len(lowerCamelCase ) for x, y in zip(lowerCamelCase , processed_features[input_name] ) ) )
UpperCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCamelCase )
UpperCAmelCase__ = BatchFeature({input_name: speech_inputs} , tensor_type="np" )
UpperCAmelCase__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def UpperCAmelCase_ ( self :Dict ) -> Union[str, Any]:
UpperCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCamelCase )
UpperCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase__ = feat_extract.model_input_names[0]
UpperCAmelCase__ = BatchFeature({input_name: speech_inputs} , tensor_type="pt" )
UpperCAmelCase__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def UpperCAmelCase_ ( self :Tuple ) -> Dict:
UpperCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_common(equal_length=lowerCamelCase )
UpperCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase__ = feat_extract.model_input_names[0]
UpperCAmelCase__ = BatchFeature({input_name: speech_inputs} , tensor_type="tf" )
UpperCAmelCase__ = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
UpperCAmelCase__ = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def UpperCAmelCase_ ( self :int , lowerCamelCase :int=False ) -> str:
def _inputs_have_equal_length(lowerCamelCase :Union[str, Any] ):
UpperCAmelCase__ = len(input[0] )
for input_slice in input[1:]:
if len(lowerCamelCase ) != length:
return False
return True
def _inputs_are_equal(lowerCamelCase :Dict , lowerCamelCase :Optional[Any] ):
if len(lowerCamelCase ) != len(lowerCamelCase ):
return False
for input_slice_a, input_slice_a in zip(lowerCamelCase , lowerCamelCase ):
if not np.allclose(np.asarray(lowerCamelCase ) , np.asarray(lowerCamelCase ) , atol=1e-3 ):
return False
return True
UpperCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCamelCase )
UpperCAmelCase__ = feat_extract.model_input_names[0]
UpperCAmelCase__ = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase__ = self.feat_extract_tester.seq_length_diff
UpperCAmelCase__ = self.feat_extract_tester.max_seq_length + pad_diff
UpperCAmelCase__ = self.feat_extract_tester.min_seq_length
UpperCAmelCase__ = self.feat_extract_tester.batch_size
UpperCAmelCase__ = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
UpperCAmelCase__ = feat_extract.pad(lowerCamelCase , padding=lowerCamelCase )
UpperCAmelCase__ = input_a[input_name]
UpperCAmelCase__ = feat_extract.pad(lowerCamelCase , padding="longest" )
UpperCAmelCase__ = input_a[input_name]
UpperCAmelCase__ = feat_extract.pad(lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[-1] ) )
UpperCAmelCase__ = input_a[input_name]
UpperCAmelCase__ = feat_extract.pad(lowerCamelCase , padding="longest" , return_tensors="np" )
UpperCAmelCase__ = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(lowerCamelCase ):
feat_extract.pad(lowerCamelCase , padding="max_length" )[input_name]
UpperCAmelCase__ = feat_extract.pad(
lowerCamelCase , padding="max_length" , max_length=lowerCamelCase , return_tensors="np" )
UpperCAmelCase__ = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(lowerCamelCase ) )
self.assertTrue(_inputs_have_equal_length(lowerCamelCase ) )
self.assertTrue(_inputs_have_equal_length(lowerCamelCase ) )
self.assertTrue(_inputs_are_equal(lowerCamelCase , lowerCamelCase ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
UpperCAmelCase__ = feat_extract.pad(lowerCamelCase , pad_to_multiple_of=10 )
UpperCAmelCase__ = input_a[input_name]
UpperCAmelCase__ = feat_extract.pad(lowerCamelCase , padding="longest" , pad_to_multiple_of=10 )
UpperCAmelCase__ = input_a[input_name]
UpperCAmelCase__ = feat_extract.pad(
lowerCamelCase , padding="max_length" , pad_to_multiple_of=10 , max_length=lowerCamelCase )
UpperCAmelCase__ = input_a[input_name]
UpperCAmelCase__ = feat_extract.pad(
lowerCamelCase , padding="max_length" , pad_to_multiple_of=10 , max_length=lowerCamelCase , return_tensors="np" , )
UpperCAmelCase__ = input_a[input_name]
self.assertTrue(all(len(lowerCamelCase ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(lowerCamelCase , lowerCamelCase ) )
UpperCAmelCase__ = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(lowerCamelCase ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
UpperCAmelCase__ = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def UpperCAmelCase_ ( self :List[str] , lowerCamelCase :int=False ) -> str:
def _inputs_have_equal_length(lowerCamelCase :Any ):
UpperCAmelCase__ = len(input[0] )
for input_slice in input[1:]:
if len(lowerCamelCase ) != length:
return False
return True
def _inputs_are_equal(lowerCamelCase :Optional[int] , lowerCamelCase :str ):
if len(lowerCamelCase ) != len(lowerCamelCase ):
return False
for input_slice_a, input_slice_a in zip(lowerCamelCase , lowerCamelCase ):
if not np.allclose(np.asarray(lowerCamelCase ) , np.asarray(lowerCamelCase ) , atol=1e-3 ):
return False
return True
UpperCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_common(numpify=lowerCamelCase )
UpperCAmelCase__ = feat_extract.model_input_names[0]
UpperCAmelCase__ = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
UpperCAmelCase__ = feat_extract.pad(
lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[0] ) , truncation=lowerCamelCase )
UpperCAmelCase__ = input_a[input_name]
UpperCAmelCase__ = feat_extract.pad(lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[0] ) )
UpperCAmelCase__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(lowerCamelCase ) )
self.assertFalse(_inputs_have_equal_length(lowerCamelCase ) )
# truncate to smallest with np
UpperCAmelCase__ = feat_extract.pad(
lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[0] ) , return_tensors="np" , truncation=lowerCamelCase , )
UpperCAmelCase__ = input_a[input_name]
UpperCAmelCase__ = feat_extract.pad(
lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[0] ) , return_tensors="np" )
UpperCAmelCase__ = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(lowerCamelCase ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(lowerCamelCase ) )
# truncate to middle
UpperCAmelCase__ = feat_extract.pad(
lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[1] ) , truncation=lowerCamelCase , return_tensors="np" , )
UpperCAmelCase__ = input_a[input_name]
UpperCAmelCase__ = feat_extract.pad(
lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[1] ) , truncation=lowerCamelCase )
UpperCAmelCase__ = input_a[input_name]
UpperCAmelCase__ = feat_extract.pad(
lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[1] ) , return_tensors="np" )
UpperCAmelCase__ = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(lowerCamelCase ) )
self.assertTrue(_inputs_have_equal_length(lowerCamelCase ) )
self.assertTrue(_inputs_are_equal(lowerCamelCase , lowerCamelCase ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(lowerCamelCase ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCamelCase ):
feat_extract.pad(lowerCamelCase , truncation=lowerCamelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCamelCase ):
feat_extract.pad(lowerCamelCase , padding="longest" , truncation=lowerCamelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(lowerCamelCase ):
feat_extract.pad(lowerCamelCase , padding="longest" , truncation=lowerCamelCase )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(lowerCamelCase ):
feat_extract.pad(lowerCamelCase , padding="max_length" , truncation=lowerCamelCase )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
UpperCAmelCase__ = 12
UpperCAmelCase__ = feat_extract.pad(
lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=lowerCamelCase , truncation=lowerCamelCase , )
UpperCAmelCase__ = input_a[input_name]
UpperCAmelCase__ = feat_extract.pad(
lowerCamelCase , padding="max_length" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=lowerCamelCase , )
UpperCAmelCase__ = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
UpperCAmelCase__ = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
UpperCAmelCase__ = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(lowerCamelCase ) )
self.assertFalse(_inputs_have_equal_length(lowerCamelCase ) )
def UpperCAmelCase_ ( self :int ) -> List[str]:
self._check_padding(numpify=lowerCamelCase )
def UpperCAmelCase_ ( self :List[Any] ) -> int:
self._check_padding(numpify=lowerCamelCase )
def UpperCAmelCase_ ( self :str ) -> str:
self._check_truncation(numpify=lowerCamelCase )
def UpperCAmelCase_ ( self :Dict ) -> str:
self._check_truncation(numpify=lowerCamelCase )
@require_torch
def UpperCAmelCase_ ( self :int ) -> Any:
UpperCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_common()
UpperCAmelCase__ = feat_extract.model_input_names[0]
UpperCAmelCase__ = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase__ = feat_extract.pad(lowerCamelCase , padding="longest" , return_tensors="np" )[input_name]
UpperCAmelCase__ = feat_extract.pad(lowerCamelCase , padding="longest" , return_tensors="pt" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def UpperCAmelCase_ ( self :List[Any] ) -> Optional[Any]:
UpperCAmelCase__ = self.feature_extraction_class(**self.feat_extract_dict )
UpperCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_common()
UpperCAmelCase__ = feat_extract.model_input_names[0]
UpperCAmelCase__ = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase__ = feat_extract.pad(lowerCamelCase , padding="longest" , return_tensors="np" )[input_name]
UpperCAmelCase__ = feat_extract.pad(lowerCamelCase , padding="longest" , return_tensors="tf" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def UpperCAmelCase_ ( self :List[str] ) -> str:
UpperCAmelCase__ = self.feat_extract_dict
UpperCAmelCase__ = True
UpperCAmelCase__ = self.feature_extraction_class(**lowerCamelCase )
UpperCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_common()
UpperCAmelCase__ = [len(lowerCamelCase ) for x in speech_inputs]
UpperCAmelCase__ = feat_extract.model_input_names[0]
UpperCAmelCase__ = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase__ = feat_extract.pad(lowerCamelCase , padding="longest" , return_tensors="np" )
self.assertIn("attention_mask" , lowerCamelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , lowerCamelCase )
def UpperCAmelCase_ ( self :int ) -> int:
UpperCAmelCase__ = self.feat_extract_dict
UpperCAmelCase__ = True
UpperCAmelCase__ = self.feature_extraction_class(**lowerCamelCase )
UpperCAmelCase__ = self.feat_extract_tester.prepare_inputs_for_common()
UpperCAmelCase__ = [len(lowerCamelCase ) for x in speech_inputs]
UpperCAmelCase__ = feat_extract.model_input_names[0]
UpperCAmelCase__ = BatchFeature({input_name: speech_inputs} )
UpperCAmelCase__ = min(lowerCamelCase )
UpperCAmelCase__ = feat_extract.pad(
lowerCamelCase , padding="max_length" , max_length=lowerCamelCase , truncation=lowerCamelCase , return_tensors="np" )
self.assertIn("attention_mask" , lowerCamelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] )
| 364 | 0 |
'''simple docstring'''
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
__lowerCAmelCase = logging.get_logger(__name__)
enable_full_determinism()
class __SCREAMING_SNAKE_CASE (__A , __A , unittest.TestCase ):
"""simple docstring"""
_a : Optional[Any] = UNetaDModel
_a : str = '''sample'''
@property
def _a ( self ):
"""simple docstring"""
a_ = 4
a_ = 3
a_ = (32, 32)
a_ = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
a_ = torch.tensor([10] ).to(UpperCamelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def _a ( self ):
"""simple docstring"""
return (3, 32, 32)
@property
def _a ( self ):
"""simple docstring"""
return (3, 32, 32)
def _a ( self ):
"""simple docstring"""
a_ = {
'block_out_channels': (32, 64),
'down_block_types': ('DownBlock2D', 'AttnDownBlock2D'),
'up_block_types': ('AttnUpBlock2D', 'UpBlock2D'),
'attention_head_dim': 3,
'out_channels': 3,
'in_channels': 3,
'layers_per_block': 2,
'sample_size': 32,
}
a_ = self.dummy_input
return init_dict, inputs_dict
class __SCREAMING_SNAKE_CASE (__A , __A , unittest.TestCase ):
"""simple docstring"""
_a : Any = UNetaDModel
_a : Dict = '''sample'''
@property
def _a ( self ):
"""simple docstring"""
a_ = 4
a_ = 4
a_ = (32, 32)
a_ = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
a_ = torch.tensor([10] ).to(UpperCamelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def _a ( self ):
"""simple docstring"""
return (4, 32, 32)
@property
def _a ( self ):
"""simple docstring"""
return (4, 32, 32)
def _a ( self ):
"""simple docstring"""
a_ = {
'sample_size': 32,
'in_channels': 4,
'out_channels': 4,
'layers_per_block': 2,
'block_out_channels': (32, 64),
'attention_head_dim': 32,
'down_block_types': ('DownBlock2D', 'DownBlock2D'),
'up_block_types': ('UpBlock2D', 'UpBlock2D'),
}
a_ = self.dummy_input
return init_dict, inputs_dict
def _a ( self ):
"""simple docstring"""
a_ , a_ = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(UpperCamelCase__ )
a_ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def _a ( self ):
"""simple docstring"""
a_ , a_ = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=UpperCamelCase__ )
model.to(UpperCamelCase__ )
a_ = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != 'cuda' , 'This test is supposed to run on GPU' )
def _a ( self ):
"""simple docstring"""
a_ , a_ = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' , output_loading_info=UpperCamelCase__ )
model_accelerate.to(UpperCamelCase__ )
model_accelerate.eval()
a_ = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
a_ = noise.to(UpperCamelCase__ )
a_ = torch.tensor([10] * noise.shape[0] ).to(UpperCamelCase__ )
a_ = model_accelerate(UpperCamelCase__ , UpperCamelCase__ )['sample']
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
a_ , a_ = UNetaDModel.from_pretrained(
'fusing/unet-ldm-dummy-update' , output_loading_info=UpperCamelCase__ , low_cpu_mem_usage=UpperCamelCase__ )
model_normal_load.to(UpperCamelCase__ )
model_normal_load.eval()
a_ = model_normal_load(UpperCamelCase__ , UpperCamelCase__ )['sample']
assert torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1e-3 )
def _a ( self ):
"""simple docstring"""
a_ = UNetaDModel.from_pretrained('fusing/unet-ldm-dummy-update' )
model.eval()
model.to(UpperCamelCase__ )
a_ = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
a_ = noise.to(UpperCamelCase__ )
a_ = torch.tensor([10] * noise.shape[0] ).to(UpperCamelCase__ )
with torch.no_grad():
a_ = model(UpperCamelCase__ , UpperCamelCase__ ).sample
a_ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
a_ = torch.tensor([-13.3_258, -20.1_100, -15.9_873, -17.6_617, -23.0_596, -17.9_419, -13.3_675, -16.1_889, -12.3_800] )
# fmt: on
self.assertTrue(torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1e-3 ) )
class __SCREAMING_SNAKE_CASE (__A , __A , unittest.TestCase ):
"""simple docstring"""
_a : Dict = UNetaDModel
_a : str = '''sample'''
@property
def _a ( self , UpperCamelCase__=(32, 32) ):
"""simple docstring"""
a_ = 4
a_ = 3
a_ = floats_tensor((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
a_ = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=UpperCamelCase__ )
return {"sample": noise, "timestep": time_step}
@property
def _a ( self ):
"""simple docstring"""
return (3, 32, 32)
@property
def _a ( self ):
"""simple docstring"""
return (3, 32, 32)
def _a ( self ):
"""simple docstring"""
a_ = {
'block_out_channels': [32, 64, 64, 64],
'in_channels': 3,
'layers_per_block': 1,
'out_channels': 3,
'time_embedding_type': 'fourier',
'norm_eps': 1e-6,
'mid_block_scale_factor': math.sqrt(2.0 ),
'norm_num_groups': None,
'down_block_types': [
'SkipDownBlock2D',
'AttnSkipDownBlock2D',
'SkipDownBlock2D',
'SkipDownBlock2D',
],
'up_block_types': [
'SkipUpBlock2D',
'SkipUpBlock2D',
'AttnSkipUpBlock2D',
'SkipUpBlock2D',
],
}
a_ = self.dummy_input
return init_dict, inputs_dict
@slow
def _a ( self ):
"""simple docstring"""
a_ , a_ = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' , output_loading_info=UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
self.assertEqual(len(loading_info['missing_keys'] ) , 0 )
model.to(UpperCamelCase__ )
a_ = self.dummy_input
a_ = floats_tensor((4, 3) + (256, 256) ).to(UpperCamelCase__ )
a_ = noise
a_ = model(**UpperCamelCase__ )
assert image is not None, "Make sure output is not None"
@slow
def _a ( self ):
"""simple docstring"""
a_ = UNetaDModel.from_pretrained('google/ncsnpp-celebahq-256' )
model.to(UpperCamelCase__ )
a_ = 4
a_ = 3
a_ = (256, 256)
a_ = torch.ones((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
a_ = torch.tensor(batch_size * [1e-4] ).to(UpperCamelCase__ )
with torch.no_grad():
a_ = model(UpperCamelCase__ , UpperCamelCase__ ).sample
a_ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
a_ = torch.tensor([-4_842.8_691, -6_499.6_631, -3_800.1_953, -7_978.2_686, -10_980.7_129, -20_028.8_535, 8_148.2_822, 2_342.2_905, 567.7_608] )
# fmt: on
self.assertTrue(torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1e-2 ) )
def _a ( self ):
"""simple docstring"""
a_ = UNetaDModel.from_pretrained('fusing/ncsnpp-ffhq-ve-dummy-update' )
model.to(UpperCamelCase__ )
a_ = 4
a_ = 3
a_ = (32, 32)
a_ = torch.ones((batch_size, num_channels) + sizes ).to(UpperCamelCase__ )
a_ = torch.tensor(batch_size * [1e-4] ).to(UpperCamelCase__ )
with torch.no_grad():
a_ = model(UpperCamelCase__ , UpperCamelCase__ ).sample
a_ = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
a_ = torch.tensor([-0.0_325, -0.0_900, -0.0_869, -0.0_332, -0.0_725, -0.0_270, -0.0_101, 0.0_227, 0.0_256] )
# fmt: on
self.assertTrue(torch_all_close(UpperCamelCase__ , UpperCamelCase__ , rtol=1e-2 ) )
def _a ( self ):
"""simple docstring"""
pass
| 536 |
'''simple docstring'''
def __UpperCamelCase ( lowercase_ : int = 1_000_000 ):
"""simple docstring"""
a_ = set(range(3 , lowercase_ , 2 ) )
primes.add(2 )
for p in range(3 , lowercase_ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , lowercase_ , lowercase_ ) ) )
a_ = [float(lowercase_ ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowercase_ , limit + 1 , lowercase_ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(f"""{solution() = }""")
| 536 | 1 |
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase ):
UpperCAmelCase_ = data
UpperCAmelCase_ = [0x6745_2301, 0xEFCD_AB89, 0x98BA_DCFE, 0x1032_5476, 0xC3D2_E1F0]
@staticmethod
def A__ ( lowerCAmelCase , lowerCAmelCase ):
return ((n << b) | (n >> (32 - b))) & 0xFFFF_FFFF
def A__ ( self ):
UpperCAmelCase_ = b"\x80" + b"\x00" * (63 - (len(self.data ) + 8) % 64)
UpperCAmelCase_ = self.data + padding + struct.pack(">Q" , 8 * len(self.data ) )
return padded_data
def A__ ( self ):
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = list(struct.unpack(">16L" , lowerCAmelCase ) ) + [0] * 64
for i in range(16 , 80 ):
UpperCAmelCase_ = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def A__ ( self ):
UpperCAmelCase_ = self.padding()
UpperCAmelCase_ = self.split_blocks()
for block in self.blocks:
UpperCAmelCase_ = self.expand_block(lowerCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
UpperCAmelCase_ = (b & c) | ((~b) & d)
UpperCAmelCase_ = 0x5A82_7999
elif 20 <= i < 40:
UpperCAmelCase_ = b ^ c ^ d
UpperCAmelCase_ = 0x6ED9_EBA1
elif 40 <= i < 60:
UpperCAmelCase_ = (b & c) | (b & d) | (c & d)
UpperCAmelCase_ = 0x8F1B_BCDC
elif 60 <= i < 80:
UpperCAmelCase_ = b ^ c ^ d
UpperCAmelCase_ = 0xCA62_C1D6
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = (
self.rotate(lowerCAmelCase , 5 ) + f + e + k + expanded_block[i] & 0xFFFF_FFFF,
a,
self.rotate(lowerCAmelCase , 30 ),
c,
d,
)
UpperCAmelCase_ = (
self.h[0] + a & 0xFFFF_FFFF,
self.h[1] + b & 0xFFFF_FFFF,
self.h[2] + c & 0xFFFF_FFFF,
self.h[3] + d & 0xFFFF_FFFF,
self.h[4] + e & 0xFFFF_FFFF,
)
return ("{:08x}" * 5).format(*self.h )
def snake_case__ ( ) -> int:
UpperCAmelCase_ = B"Test String"
assert SHAaHash(__SCREAMING_SNAKE_CASE ).final_hash() == hashlib.shaa(__SCREAMING_SNAKE_CASE ).hexdigest() # noqa: S324
def snake_case__ ( ) -> Any:
UpperCAmelCase_ = argparse.ArgumentParser(description="Process some strings or files" )
parser.add_argument(
"--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , )
parser.add_argument("--file" , dest="input_file" , help="Hash contents of a file" )
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , "rb" ) as f:
UpperCAmelCase_ = f.read()
else:
UpperCAmelCase_ = bytes(__SCREAMING_SNAKE_CASE , "utf-8" )
print(SHAaHash(__SCREAMING_SNAKE_CASE ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 23 |
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = 0
while number > 0:
UpperCAmelCase_ = number % 10
sum_of_digits += last_digit
UpperCAmelCase_ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def snake_case__ ( __SCREAMING_SNAKE_CASE = 100 ) -> int:
UpperCAmelCase_ = factorial(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = split_and_add(__SCREAMING_SNAKE_CASE )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 23 | 1 |
import logging
from transformers.configuration_utils import PretrainedConfig
__a = logging.getLogger(__name__)
class __SCREAMING_SNAKE_CASE ( A__ ):
A : Union[str, Any] = 'masked_bert'
def __init__( self , SCREAMING_SNAKE_CASE__=30522 , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=3072 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=512 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__=0.02 , SCREAMING_SNAKE_CASE__=1E-12 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__="topK" , SCREAMING_SNAKE_CASE__="constant" , SCREAMING_SNAKE_CASE__=0.0 , **SCREAMING_SNAKE_CASE__ , ):
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
lowercase : Dict = vocab_size
lowercase : Dict = hidden_size
lowercase : List[str] = num_hidden_layers
lowercase : Union[str, Any] = num_attention_heads
lowercase : str = hidden_act
lowercase : Tuple = intermediate_size
lowercase : int = hidden_dropout_prob
lowercase : str = attention_probs_dropout_prob
lowercase : int = max_position_embeddings
lowercase : Union[str, Any] = type_vocab_size
lowercase : Optional[Any] = initializer_range
lowercase : str = layer_norm_eps
lowercase : int = pruning_method
lowercase : List[Any] = mask_init
lowercase : Dict = mask_scale
| 319 |
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class __SCREAMING_SNAKE_CASE ( A__ , A__ , unittest.TestCase ):
A : List[str] = IFInpaintingPipeline
A : List[str] = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {'width', 'height'}
A : Tuple = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A : Optional[Any] = PipelineTesterMixin.required_optional_params - {'latents'}
def __lowerCamelCase ( self ):
return self._get_dummy_components()
def __lowerCamelCase ( self , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=0 ):
if str(SCREAMING_SNAKE_CASE__ ).startswith('''mps''' ):
lowercase : Tuple = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
lowercase : Dict = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
lowercase : int = floats_tensor((1, 3, 32, 32) , rng=random.Random(SCREAMING_SNAKE_CASE__ ) ).to(SCREAMING_SNAKE_CASE__ )
lowercase : Tuple = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''mask_image''': mask_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def __lowerCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __lowerCamelCase ( self ):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def __lowerCamelCase ( self ):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __lowerCamelCase ( self ):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __lowerCamelCase ( self ):
self._test_save_load_local()
def __lowerCamelCase ( self ):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , )
| 319 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase_ = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 700 |
def __magic_name__ ( __a : str , __a : str ):
'''simple docstring'''
UpperCamelCase__ = len(__a )
UpperCamelCase__ = len(__a )
UpperCamelCase__ = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
UpperCamelCase__ = True
for i in range(__a ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
UpperCamelCase__ = True
if a[i].islower():
UpperCamelCase__ = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 86 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import ScoreSdeVePipeline, ScoreSdeVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class lowercase ( unittest.TestCase ):
@property
def _snake_case ( self ) -> List[str]:
torch.manual_seed(0 )
lowerCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
def _snake_case ( self ) -> List[Any]:
lowerCAmelCase = self.dummy_uncond_unet
lowerCAmelCase = ScoreSdeVeScheduler()
lowerCAmelCase = ScoreSdeVePipeline(unet=lowercase , scheduler=lowercase )
sde_ve.to(lowercase )
sde_ve.set_progress_bar_config(disable=lowercase )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=lowercase ).images
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sde_ve(num_inference_steps=2 , output_type="""numpy""" , generator=lowercase , return_dict=lowercase )[
0
]
lowerCAmelCase = image[0, -3:, -3:, -1]
lowerCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class lowercase ( unittest.TestCase ):
def _snake_case ( self ) -> Optional[int]:
lowerCAmelCase = """google/ncsnpp-church-256"""
lowerCAmelCase = UNetaDModel.from_pretrained(lowercase )
lowerCAmelCase = ScoreSdeVeScheduler.from_pretrained(lowercase )
lowerCAmelCase = ScoreSdeVePipeline(unet=lowercase , scheduler=lowercase )
sde_ve.to(lowercase )
sde_ve.set_progress_bar_config(disable=lowercase )
lowerCAmelCase = torch.manual_seed(0 )
lowerCAmelCase = sde_ve(num_inference_steps=10 , output_type="""numpy""" , generator=lowercase ).images
lowerCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 256, 256, 3)
lowerCAmelCase = np.array([0.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 532 |
"""simple docstring"""
import io
import json
import unittest
from parameterized import parameterized
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
from transformers.testing_utils import get_tests_dir, require_torch, slow, torch_device
from utils import calculate_bleu
SCREAMING_SNAKE_CASE__ = get_tests_dir() + "/test_data/fsmt/fsmt_val_data.json"
with io.open(filename, "r", encoding="utf-8") as f:
SCREAMING_SNAKE_CASE__ = json.load(f)
@require_torch
class lowercase ( unittest.TestCase ):
def _snake_case ( self , lowercase ) -> Tuple:
return FSMTTokenizer.from_pretrained(lowercase )
def _snake_case ( self , lowercase ) -> Dict:
lowerCAmelCase = FSMTForConditionalGeneration.from_pretrained(lowercase ).to(lowercase )
if torch_device == "cuda":
model.half()
return model
@parameterized.expand(
[
["""en-ru""", 26.0],
["""ru-en""", 22.0],
["""en-de""", 22.0],
["""de-en""", 29.0],
] )
@slow
def _snake_case ( self , lowercase , lowercase ) -> Dict:
# note: this test is not testing the best performance since it only evals a small batch
# but it should be enough to detect a regression in the output quality
lowerCAmelCase = f'facebook/wmt19-{pair}'
lowerCAmelCase = self.get_tokenizer(lowercase )
lowerCAmelCase = self.get_model(lowercase )
lowerCAmelCase = bleu_data[pair]["""src"""]
lowerCAmelCase = bleu_data[pair]["""tgt"""]
lowerCAmelCase = tokenizer(lowercase , return_tensors="""pt""" , truncation=lowercase , padding="""longest""" ).to(lowercase )
lowerCAmelCase = model.generate(
input_ids=batch.input_ids , num_beams=8 , )
lowerCAmelCase = tokenizer.batch_decode(
lowercase , skip_special_tokens=lowercase , clean_up_tokenization_spaces=lowercase )
lowerCAmelCase = calculate_bleu(lowercase , lowercase )
print(lowercase )
self.assertGreaterEqual(scores["""bleu"""] , lowercase )
| 532 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
snake_case__ : Tuple = None
snake_case__ : Union[str, Any] = logging.get_logger(__name__)
snake_case__ : Optional[int] = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
snake_case__ : Dict = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
},
'tokenizer_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json',
},
}
snake_case__ : Union[str, Any] = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
snake_case__ : List[Any] = '▁'
# Segments (not really needed)
snake_case__ : Any = 0
snake_case__ : Dict = 1
snake_case__ : int = 2
snake_case__ : Dict = 3
snake_case__ : Tuple = 4
class snake_case_( __lowercase ):
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase = '''left'''
__UpperCamelCase = XLNetTokenizer
def __init__( self : Optional[Any] , UpperCamelCase_ : int=None , UpperCamelCase_ : List[Any]=None , UpperCamelCase_ : List[str]=False , UpperCamelCase_ : Tuple=True , UpperCamelCase_ : Any=False , UpperCamelCase_ : List[str]="<s>" , UpperCamelCase_ : str="</s>" , UpperCamelCase_ : Optional[Any]="<unk>" , UpperCamelCase_ : List[str]="<sep>" , UpperCamelCase_ : Dict="<pad>" , UpperCamelCase_ : Tuple="<cls>" , UpperCamelCase_ : List[str]="<mask>" , UpperCamelCase_ : Optional[int]=["<eop>", "<eod>"] , **UpperCamelCase_ : Any , ):
# Mask token behave like a normal word, i.e. include the space before it
lowerCAmelCase : Any = AddedToken(UpperCamelCase_ , lstrip=UpperCamelCase_ , rstrip=UpperCamelCase_ ) if isinstance(UpperCamelCase_ , UpperCamelCase_ ) else mask_token
super().__init__(
vocab_file=UpperCamelCase_ , tokenizer_file=UpperCamelCase_ , do_lower_case=UpperCamelCase_ , remove_space=UpperCamelCase_ , keep_accents=UpperCamelCase_ , bos_token=UpperCamelCase_ , eos_token=UpperCamelCase_ , unk_token=UpperCamelCase_ , sep_token=UpperCamelCase_ , pad_token=UpperCamelCase_ , cls_token=UpperCamelCase_ , mask_token=UpperCamelCase_ , additional_special_tokens=UpperCamelCase_ , **UpperCamelCase_ , )
lowerCAmelCase : int = 3
lowerCAmelCase : int = do_lower_case
lowerCAmelCase : Any = remove_space
lowerCAmelCase : List[Any] = keep_accents
lowerCAmelCase : Optional[Any] = vocab_file
lowerCAmelCase : List[str] = False if not self.vocab_file else True
def lowerCamelCase__ ( self : List[Any] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
lowerCAmelCase : Optional[int] = [self.sep_token_id]
lowerCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowerCamelCase__ ( self : List[str] , UpperCamelCase_ : List[int] , UpperCamelCase_ : Optional[List[int]] = None ):
lowerCAmelCase : List[str] = [self.sep_token_id]
lowerCAmelCase : Dict = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowerCamelCase__ ( self : str , UpperCamelCase_ : str , UpperCamelCase_ : Optional[str] = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCamelCase_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowerCAmelCase : Optional[int] = os.path.join(
UpperCamelCase_ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase_ ):
copyfile(self.vocab_file , UpperCamelCase_ )
return (out_vocab_file,)
| 706 |
"""simple docstring"""
import time
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers.generation import (
MaxLengthCriteria,
MaxNewTokensCriteria,
MaxTimeCriteria,
StoppingCriteriaList,
validate_stopping_criteria,
)
@require_torch
class snake_case_( unittest.TestCase ):
def lowerCamelCase__ ( self : int , UpperCamelCase_ : int ):
lowerCAmelCase : str = 3
lowerCAmelCase : Tuple = 2_5_0
lowerCAmelCase : Optional[Any] = ids_tensor((batch_size, length) , UpperCamelCase_ )
lowerCAmelCase : Optional[Any] = torch.ones((batch_size, length) , device=UpperCamelCase_ , dtype=torch.float ) / length
return input_ids, scores
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase, lowerCAmelCase : Optional[int] = self._get_tensors(5 )
lowerCAmelCase : Union[str, Any] = StoppingCriteriaList(
[
MaxLengthCriteria(max_length=1_0 ),
MaxTimeCriteria(max_time=0.1 ),
] )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase, lowerCAmelCase : List[str] = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase, lowerCAmelCase : Any = self._get_tensors(1_0 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCamelCase__ ( self : Optional[Any] ):
lowerCAmelCase : Optional[Any] = MaxLengthCriteria(max_length=1_0 )
lowerCAmelCase, lowerCAmelCase : Optional[Any] = self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase, lowerCAmelCase : List[str] = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase, lowerCAmelCase : str = self._get_tensors(1_0 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCamelCase__ ( self : List[Any] ):
lowerCAmelCase : Optional[Any] = MaxNewTokensCriteria(start_length=5 , max_new_tokens=5 )
lowerCAmelCase, lowerCAmelCase : Optional[int] = self._get_tensors(5 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase, lowerCAmelCase : Union[str, Any] = self._get_tensors(9 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase, lowerCAmelCase : str = self._get_tensors(1_0 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase : Dict = StoppingCriteriaList([criteria] )
self.assertEqual(criteria_list.max_length , 1_0 )
def lowerCamelCase__ ( self : Union[str, Any] ):
lowerCAmelCase, lowerCAmelCase : Tuple = self._get_tensors(5 )
lowerCAmelCase : List[str] = MaxTimeCriteria(max_time=0.1 )
self.assertFalse(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
lowerCAmelCase : List[str] = MaxTimeCriteria(max_time=0.1 , initial_timestamp=time.time() - 0.2 )
self.assertTrue(criteria(UpperCamelCase_ , UpperCamelCase_ ) )
def lowerCamelCase__ ( self : str ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_0 )
with self.assertWarns(UpperCamelCase_ ):
validate_stopping_criteria(StoppingCriteriaList([MaxLengthCriteria(1_0 )] ) , 1_1 )
lowerCAmelCase : str = validate_stopping_criteria(StoppingCriteriaList() , 1_1 )
self.assertEqual(len(UpperCamelCase_ ) , 1 )
| 637 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bert import BertTokenizer
_SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Union[str, Any] = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_SCREAMING_SNAKE_CASE : Union[str, Any] = {
'''vocab_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/vocab.txt''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/vocab.txt''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/vocab.txt''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/vocab.txt''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/vocab.txt'''
),
'''bert-base-multilingual-cased''': '''https://huggingface.co/bert-base-multilingual-cased/resolve/main/vocab.txt''',
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/vocab.txt''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/vocab.txt''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/vocab.txt'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/vocab.txt'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/vocab.txt'''
),
'''bert-base-german-dbmdz-cased''': '''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/vocab.txt''',
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/vocab.txt'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/vocab.txt'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''bert-base-uncased''': '''https://huggingface.co/bert-base-uncased/resolve/main/tokenizer.json''',
'''bert-large-uncased''': '''https://huggingface.co/bert-large-uncased/resolve/main/tokenizer.json''',
'''bert-base-cased''': '''https://huggingface.co/bert-base-cased/resolve/main/tokenizer.json''',
'''bert-large-cased''': '''https://huggingface.co/bert-large-cased/resolve/main/tokenizer.json''',
'''bert-base-multilingual-uncased''': (
'''https://huggingface.co/bert-base-multilingual-uncased/resolve/main/tokenizer.json'''
),
'''bert-base-multilingual-cased''': (
'''https://huggingface.co/bert-base-multilingual-cased/resolve/main/tokenizer.json'''
),
'''bert-base-chinese''': '''https://huggingface.co/bert-base-chinese/resolve/main/tokenizer.json''',
'''bert-base-german-cased''': '''https://huggingface.co/bert-base-german-cased/resolve/main/tokenizer.json''',
'''bert-large-uncased-whole-word-masking''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking/resolve/main/tokenizer.json'''
),
'''bert-large-uncased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-uncased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-large-cased-whole-word-masking-finetuned-squad''': (
'''https://huggingface.co/bert-large-cased-whole-word-masking-finetuned-squad/resolve/main/tokenizer.json'''
),
'''bert-base-cased-finetuned-mrpc''': (
'''https://huggingface.co/bert-base-cased-finetuned-mrpc/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-cased''': (
'''https://huggingface.co/bert-base-german-dbmdz-cased/resolve/main/tokenizer.json'''
),
'''bert-base-german-dbmdz-uncased''': (
'''https://huggingface.co/bert-base-german-dbmdz-uncased/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-cased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-cased-v1/resolve/main/tokenizer.json'''
),
'''TurkuNLP/bert-base-finnish-uncased-v1''': (
'''https://huggingface.co/TurkuNLP/bert-base-finnish-uncased-v1/resolve/main/tokenizer.json'''
),
'''wietsedv/bert-base-dutch-cased''': (
'''https://huggingface.co/wietsedv/bert-base-dutch-cased/resolve/main/tokenizer.json'''
),
},
}
_SCREAMING_SNAKE_CASE : Optional[Any] = {
'''bert-base-uncased''': 512,
'''bert-large-uncased''': 512,
'''bert-base-cased''': 512,
'''bert-large-cased''': 512,
'''bert-base-multilingual-uncased''': 512,
'''bert-base-multilingual-cased''': 512,
'''bert-base-chinese''': 512,
'''bert-base-german-cased''': 512,
'''bert-large-uncased-whole-word-masking''': 512,
'''bert-large-cased-whole-word-masking''': 512,
'''bert-large-uncased-whole-word-masking-finetuned-squad''': 512,
'''bert-large-cased-whole-word-masking-finetuned-squad''': 512,
'''bert-base-cased-finetuned-mrpc''': 512,
'''bert-base-german-dbmdz-cased''': 512,
'''bert-base-german-dbmdz-uncased''': 512,
'''TurkuNLP/bert-base-finnish-cased-v1''': 512,
'''TurkuNLP/bert-base-finnish-uncased-v1''': 512,
'''wietsedv/bert-base-dutch-cased''': 512,
}
_SCREAMING_SNAKE_CASE : List[Any] = {
'''bert-base-uncased''': {'''do_lower_case''': True},
'''bert-large-uncased''': {'''do_lower_case''': True},
'''bert-base-cased''': {'''do_lower_case''': False},
'''bert-large-cased''': {'''do_lower_case''': False},
'''bert-base-multilingual-uncased''': {'''do_lower_case''': True},
'''bert-base-multilingual-cased''': {'''do_lower_case''': False},
'''bert-base-chinese''': {'''do_lower_case''': False},
'''bert-base-german-cased''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking''': {'''do_lower_case''': False},
'''bert-large-uncased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': True},
'''bert-large-cased-whole-word-masking-finetuned-squad''': {'''do_lower_case''': False},
'''bert-base-cased-finetuned-mrpc''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-cased''': {'''do_lower_case''': False},
'''bert-base-german-dbmdz-uncased''': {'''do_lower_case''': True},
'''TurkuNLP/bert-base-finnish-cased-v1''': {'''do_lower_case''': False},
'''TurkuNLP/bert-base-finnish-uncased-v1''': {'''do_lower_case''': True},
'''wietsedv/bert-base-dutch-cased''': {'''do_lower_case''': False},
}
class a ( __snake_case ):
SCREAMING_SNAKE_CASE : List[str] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE : Any = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE : Any = BertTokenizer
def __init__( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Tuple=None , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : Dict=True , __SCREAMING_SNAKE_CASE : Tuple="[UNK]" , __SCREAMING_SNAKE_CASE : List[str]="[SEP]" , __SCREAMING_SNAKE_CASE : int="[PAD]" , __SCREAMING_SNAKE_CASE : List[str]="[CLS]" , __SCREAMING_SNAKE_CASE : Optional[int]="[MASK]" , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : List[str]=None , **__SCREAMING_SNAKE_CASE : Any , ) -> List[Any]:
super().__init__(
__SCREAMING_SNAKE_CASE , tokenizer_file=__SCREAMING_SNAKE_CASE , do_lower_case=__SCREAMING_SNAKE_CASE , unk_token=__SCREAMING_SNAKE_CASE , sep_token=__SCREAMING_SNAKE_CASE , pad_token=__SCREAMING_SNAKE_CASE , cls_token=__SCREAMING_SNAKE_CASE , mask_token=__SCREAMING_SNAKE_CASE , tokenize_chinese_chars=__SCREAMING_SNAKE_CASE , strip_accents=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
lowerCamelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , __SCREAMING_SNAKE_CASE ) != do_lower_case
or normalizer_state.get('strip_accents' , __SCREAMING_SNAKE_CASE ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , __SCREAMING_SNAKE_CASE ) != tokenize_chinese_chars
):
lowerCamelCase_ = getattr(__SCREAMING_SNAKE_CASE , normalizer_state.pop('type' ) )
lowerCamelCase_ = do_lower_case
lowerCamelCase_ = strip_accents
lowerCamelCase_ = tokenize_chinese_chars
lowerCamelCase_ = normalizer_class(**__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = do_lower_case
def UpperCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Dict=None ) -> Optional[int]:
lowerCamelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : List[int] , __SCREAMING_SNAKE_CASE : Optional[List[int]] = None ) -> List[int]:
lowerCamelCase_ = [self.sep_token_id]
lowerCamelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : Optional[str] = None ) -> Tuple[str]:
lowerCamelCase_ = self._tokenizer.model.save(__SCREAMING_SNAKE_CASE , name=__SCREAMING_SNAKE_CASE )
return tuple(__SCREAMING_SNAKE_CASE )
| 549 |
"""simple docstring"""
def lowerCamelCase__ ( _lowerCamelCase : str ) -> bool:
lowerCamelCase_ = 0
for ch in input_str:
lowerCamelCase_ = ord(_lowerCamelCase )
lowerCamelCase_ = pow(2 , _lowerCamelCase )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 549 | 1 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"""BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : Union[str, Any] = "altclip_text_model"
def __init__( self , SCREAMING_SNAKE_CASE__=250002 , SCREAMING_SNAKE_CASE__=1024 , SCREAMING_SNAKE_CASE__=24 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=4096 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=514 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=0.0_2 , SCREAMING_SNAKE_CASE__=0.0_2 , SCREAMING_SNAKE_CASE__=1e-05 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__="absolute" , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=768 , **SCREAMING_SNAKE_CASE__ , ) -> Dict:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = initializer_factor
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = project_dim
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : List[Any] = "altclip_vision_model"
def __init__( self , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=3072 , SCREAMING_SNAKE_CASE__=512 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=224 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__="quick_gelu" , SCREAMING_SNAKE_CASE__=1e-5 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0_2 , SCREAMING_SNAKE_CASE__=1.0 , **SCREAMING_SNAKE_CASE__ , ) -> List[Any]:
super().__init__(**SCREAMING_SNAKE_CASE__ )
A__ = hidden_size
A__ = intermediate_size
A__ = projection_dim
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = num_channels
A__ = patch_size
A__ = image_size
A__ = initializer_range
A__ = initializer_factor
A__ = attention_dropout
A__ = layer_norm_eps
A__ = hidden_act
@classmethod
def snake_case__ ( cls , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ )
A__ , A__ = cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("model_type" ) == "altclip":
A__ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : Any = "altclip"
A__ : str = True
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=2.6_5_9_2 , **SCREAMING_SNAKE_CASE__ ) -> Any:
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
A__ = kwargs.pop("text_config_dict" , SCREAMING_SNAKE_CASE__ )
A__ = kwargs.pop("vision_config_dict" , SCREAMING_SNAKE_CASE__ )
super().__init__(**SCREAMING_SNAKE_CASE__ )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
A__ = {}
# This is the complete result when using `text_config_dict`.
A__ = AltCLIPTextConfig(**SCREAMING_SNAKE_CASE__ ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
A__ = (
f"""`{key}` is found in both `text_config_dict` and `text_config` but with different values. """
f"""The value `text_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
A__ = (
f"""`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The """
f"""value `text_config[\"{key}\"]` will be overriden."""
)
logger.warning(SCREAMING_SNAKE_CASE__ )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
A__ = {}
# This is the complete result when using `vision_config_dict`.
A__ = AltCLIPVisionConfig(**SCREAMING_SNAKE_CASE__ ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
A__ = {
str(SCREAMING_SNAKE_CASE__ ): value for key, value in _vision_config_dict["id2label"].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
A__ = (
f"""`{key}` is found in both `vision_config_dict` and `vision_config` but with different """
f"""values. The value `vision_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
A__ = (
f"""`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. """
f"""The value `vision_config[\"{key}\"]` will be overriden."""
)
logger.warning(SCREAMING_SNAKE_CASE__ )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
A__ = {}
logger.info("`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values." )
if vision_config is None:
A__ = {}
logger.info("`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values." )
A__ = AltCLIPTextConfig(**SCREAMING_SNAKE_CASE__ )
A__ = AltCLIPVisionConfig(**SCREAMING_SNAKE_CASE__ )
A__ = projection_dim
A__ = logit_scale_init_value
A__ = 1.0
@classmethod
def snake_case__ ( cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Dict:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> Union[str, Any]:
A__ = copy.deepcopy(self.__dict__ )
A__ = self.text_config.to_dict()
A__ = self.vision_config.to_dict()
A__ = self.__class__.model_type
return output
| 710 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"""BAAI/AltCLIP""": """https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json""",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : Union[str, Any] = "altclip_text_model"
def __init__( self , SCREAMING_SNAKE_CASE__=250002 , SCREAMING_SNAKE_CASE__=1024 , SCREAMING_SNAKE_CASE__=24 , SCREAMING_SNAKE_CASE__=16 , SCREAMING_SNAKE_CASE__=4096 , SCREAMING_SNAKE_CASE__="gelu" , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=0.1 , SCREAMING_SNAKE_CASE__=514 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=0.0_2 , SCREAMING_SNAKE_CASE__=0.0_2 , SCREAMING_SNAKE_CASE__=1e-05 , SCREAMING_SNAKE_CASE__=1 , SCREAMING_SNAKE_CASE__=0 , SCREAMING_SNAKE_CASE__=2 , SCREAMING_SNAKE_CASE__="absolute" , SCREAMING_SNAKE_CASE__=True , SCREAMING_SNAKE_CASE__=768 , **SCREAMING_SNAKE_CASE__ , ) -> Dict:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE__ , bos_token_id=SCREAMING_SNAKE_CASE__ , eos_token_id=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
A__ = vocab_size
A__ = hidden_size
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = hidden_act
A__ = intermediate_size
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = max_position_embeddings
A__ = type_vocab_size
A__ = initializer_range
A__ = initializer_factor
A__ = layer_norm_eps
A__ = position_embedding_type
A__ = use_cache
A__ = project_dim
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : List[Any] = "altclip_vision_model"
def __init__( self , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=3072 , SCREAMING_SNAKE_CASE__=512 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=12 , SCREAMING_SNAKE_CASE__=3 , SCREAMING_SNAKE_CASE__=224 , SCREAMING_SNAKE_CASE__=32 , SCREAMING_SNAKE_CASE__="quick_gelu" , SCREAMING_SNAKE_CASE__=1e-5 , SCREAMING_SNAKE_CASE__=0.0 , SCREAMING_SNAKE_CASE__=0.0_2 , SCREAMING_SNAKE_CASE__=1.0 , **SCREAMING_SNAKE_CASE__ , ) -> List[Any]:
super().__init__(**SCREAMING_SNAKE_CASE__ )
A__ = hidden_size
A__ = intermediate_size
A__ = projection_dim
A__ = num_hidden_layers
A__ = num_attention_heads
A__ = num_channels
A__ = patch_size
A__ = image_size
A__ = initializer_range
A__ = initializer_factor
A__ = attention_dropout
A__ = layer_norm_eps
A__ = hidden_act
@classmethod
def snake_case__ ( cls , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> "PretrainedConfig":
cls._set_token_in_kwargs(SCREAMING_SNAKE_CASE__ )
A__ , A__ = cls.get_config_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("model_type" ) == "altclip":
A__ = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
class UpperCamelCase__ ( _lowerCAmelCase ):
"""simple docstring"""
A__ : Any = "altclip"
A__ : str = True
def __init__( self , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=None , SCREAMING_SNAKE_CASE__=768 , SCREAMING_SNAKE_CASE__=2.6_5_9_2 , **SCREAMING_SNAKE_CASE__ ) -> Any:
# If `_config_dict` exist, we use them for the backward compatibility.
# We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
# of confusion!).
A__ = kwargs.pop("text_config_dict" , SCREAMING_SNAKE_CASE__ )
A__ = kwargs.pop("vision_config_dict" , SCREAMING_SNAKE_CASE__ )
super().__init__(**SCREAMING_SNAKE_CASE__ )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
A__ = {}
# This is the complete result when using `text_config_dict`.
A__ = AltCLIPTextConfig(**SCREAMING_SNAKE_CASE__ ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
A__ = (
f"""`{key}` is found in both `text_config_dict` and `text_config` but with different values. """
f"""The value `text_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
A__ = (
f"""`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The """
f"""value `text_config[\"{key}\"]` will be overriden."""
)
logger.warning(SCREAMING_SNAKE_CASE__ )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
A__ = {}
# This is the complete result when using `vision_config_dict`.
A__ = AltCLIPVisionConfig(**SCREAMING_SNAKE_CASE__ ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
A__ = {
str(SCREAMING_SNAKE_CASE__ ): value for key, value in _vision_config_dict["id2label"].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
A__ = (
f"""`{key}` is found in both `vision_config_dict` and `vision_config` but with different """
f"""values. The value `vision_config_dict[\"{key}\"]` will be used instead."""
)
# If inferred from default argument values (just to be super careful)
else:
A__ = (
f"""`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. """
f"""The value `vision_config[\"{key}\"]` will be overriden."""
)
logger.warning(SCREAMING_SNAKE_CASE__ )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
A__ = {}
logger.info("`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values." )
if vision_config is None:
A__ = {}
logger.info("`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values." )
A__ = AltCLIPTextConfig(**SCREAMING_SNAKE_CASE__ )
A__ = AltCLIPVisionConfig(**SCREAMING_SNAKE_CASE__ )
A__ = projection_dim
A__ = logit_scale_init_value
A__ = 1.0
@classmethod
def snake_case__ ( cls , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ) -> Dict:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **SCREAMING_SNAKE_CASE__ )
def snake_case__ ( self ) -> Union[str, Any]:
A__ = copy.deepcopy(self.__dict__ )
A__ = self.text_config.to_dict()
A__ = self.vision_config.to_dict()
A__ = self.__class__.model_type
return output
| 562 | 0 |
"""simple docstring"""
import unittest
from diffusers.pipelines.pipeline_utils import is_safetensors_compatible
class _A ( unittest.TestCase ):
def A__ ( self ):
"""simple docstring"""
lowercase = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) )
def A__ ( self ):
"""simple docstring"""
lowercase = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) )
def A__ ( self ):
"""simple docstring"""
lowercase = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
"""unet/diffusion_pytorch_model.bin""",
# Removed: 'unet/diffusion_pytorch_model.safetensors',
]
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) )
def A__ ( self ):
"""simple docstring"""
lowercase = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase ) )
def A__ ( self ):
"""simple docstring"""
lowercase = [
"""safety_checker/pytorch_model.bin""",
"""safety_checker/model.safetensors""",
"""vae/diffusion_pytorch_model.bin""",
"""vae/diffusion_pytorch_model.safetensors""",
"""text_encoder/pytorch_model.bin""",
# Removed: 'text_encoder/model.safetensors',
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase ) )
def A__ ( self ):
"""simple docstring"""
lowercase = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
lowercase = """fp16"""
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def A__ ( self ):
"""simple docstring"""
lowercase = [
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
lowercase = """fp16"""
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def A__ ( self ):
"""simple docstring"""
lowercase = [
"""unet/diffusion_pytorch_model.bin""",
"""unet/diffusion_pytorch_model.safetensors""",
]
lowercase = """fp16"""
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def A__ ( self ):
"""simple docstring"""
lowercase = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
"""unet/diffusion_pytorch_model.fp16.bin""",
# Removed: 'unet/diffusion_pytorch_model.fp16.safetensors',
]
lowercase = """fp16"""
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def A__ ( self ):
"""simple docstring"""
lowercase = [
"""text_encoder/pytorch_model.fp16.bin""",
"""text_encoder/model.fp16.safetensors""",
]
lowercase = """fp16"""
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def A__ ( self ):
"""simple docstring"""
lowercase = [
"""text_encoder/pytorch_model.bin""",
"""text_encoder/model.safetensors""",
]
lowercase = """fp16"""
self.assertTrue(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
def A__ ( self ):
"""simple docstring"""
lowercase = [
"""safety_checker/pytorch_model.fp16.bin""",
"""safety_checker/model.fp16.safetensors""",
"""vae/diffusion_pytorch_model.fp16.bin""",
"""vae/diffusion_pytorch_model.fp16.safetensors""",
"""text_encoder/pytorch_model.fp16.bin""",
# 'text_encoder/model.fp16.safetensors',
"""unet/diffusion_pytorch_model.fp16.bin""",
"""unet/diffusion_pytorch_model.fp16.safetensors""",
]
lowercase = """fp16"""
self.assertFalse(is_safetensors_compatible(__lowerCAmelCase , variant=__lowerCAmelCase ) )
| 359 | """simple docstring"""
from math import pow, sqrt
def UpperCAmelCase__ ( *lowerCAmelCase__ :float ) -> bool:
'''simple docstring'''
lowercase = len(lowerCAmelCase__ ) > 0 and all(value > 0.0 for value in values )
return result
def UpperCAmelCase__ ( lowerCAmelCase__ :float , lowerCAmelCase__ :float ) -> float | ValueError:
'''simple docstring'''
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(lowerCAmelCase__ , lowerCAmelCase__ )
else ValueError("""Input Error: Molar mass values must greater than 0.""" )
)
def UpperCAmelCase__ ( lowerCAmelCase__ :float , lowerCAmelCase__ :float , lowerCAmelCase__ :float ) -> float | ValueError:
'''simple docstring'''
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def UpperCAmelCase__ ( lowerCAmelCase__ :float , lowerCAmelCase__ :float , lowerCAmelCase__ :float ) -> float | ValueError:
'''simple docstring'''
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def UpperCAmelCase__ ( lowerCAmelCase__ :float , lowerCAmelCase__ :float , lowerCAmelCase__ :float ) -> float | ValueError:
'''simple docstring'''
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
def UpperCAmelCase__ ( lowerCAmelCase__ :float , lowerCAmelCase__ :float , lowerCAmelCase__ :float ) -> float | ValueError:
'''simple docstring'''
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else ValueError(
"""Input Error: Molar mass and effusion rate values must greater than 0.""" )
)
| 359 | 1 |
import os
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers.models.realm.configuration_realm import RealmConfig
from transformers.models.realm.retrieval_realm import _REALM_BLOCK_RECORDS_FILENAME, RealmRetriever
from transformers.models.realm.tokenization_realm import VOCAB_FILES_NAMES, RealmTokenizer
class _snake_case ( __snake_case ):
'''simple docstring'''
def A__ ( self: Any ) -> Dict:
UpperCAmelCase_ : Dict = tempfile.mkdtemp()
UpperCAmelCase_ : Union[str, Any] = 5
# Realm tok
UpperCAmelCase_ : Optional[int] = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""test""",
"""question""",
"""this""",
"""is""",
"""the""",
"""first""",
"""second""",
"""third""",
"""fourth""",
"""fifth""",
"""record""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
UpperCAmelCase_ : List[Any] = os.path.join(self.tmpdirname ,"""realm_tokenizer""" )
os.makedirs(lowerCamelCase_ ,exist_ok=lowerCamelCase_ )
UpperCAmelCase_ : Union[str, Any] = os.path.join(lowerCamelCase_ ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
UpperCAmelCase_ : str = os.path.join(self.tmpdirname ,"""realm_block_records""" )
os.makedirs(lowerCamelCase_ ,exist_ok=lowerCamelCase_ )
def A__ ( self: List[Any] ) -> RealmTokenizer:
return RealmTokenizer.from_pretrained(os.path.join(self.tmpdirname ,"""realm_tokenizer""" ) )
def A__ ( self: Any ) -> int:
shutil.rmtree(self.tmpdirname )
def A__ ( self: str ) -> Optional[int]:
UpperCAmelCase_ : Dict = RealmConfig(num_block_records=self.num_block_records )
return config
def A__ ( self: Union[str, Any] ) -> Any:
UpperCAmelCase_ : Union[str, Any] = Dataset.from_dict(
{
"""id""": ["""0""", """1"""],
"""question""": ["""foo""", """bar"""],
"""answers""": [["""Foo""", """Bar"""], ["""Bar"""]],
} )
return dataset
def A__ ( self: Tuple ) -> Union[str, Any]:
UpperCAmelCase_ : Tuple = np.array(
[
b"""This is the first record""",
b"""This is the second record""",
b"""This is the third record""",
b"""This is the fourth record""",
b"""This is the fifth record""",
b"""This is a longer longer longer record""",
] ,dtype=lowerCamelCase_ ,)
return block_records
def A__ ( self: Optional[Any] ) -> Tuple:
UpperCAmelCase_ : int = RealmRetriever(
block_records=self.get_dummy_block_records() ,tokenizer=self.get_tokenizer() ,)
return retriever
def A__ ( self: Dict ) -> Tuple:
UpperCAmelCase_ : Dict = self.get_config()
UpperCAmelCase_ : Any = self.get_dummy_retriever()
UpperCAmelCase_ : Optional[Any] = retriever.tokenizer
UpperCAmelCase_ : List[str] = np.array([0, 3] ,dtype="""long""" )
UpperCAmelCase_ : Union[str, Any] = tokenizer(["""Test question"""] ).input_ids
UpperCAmelCase_ : str = tokenizer(
["""the fourth"""] ,add_special_tokens=lowerCamelCase_ ,return_token_type_ids=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,).input_ids
UpperCAmelCase_ : List[Any] = config.reader_seq_len
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Any = retriever(
lowerCamelCase_ ,lowerCamelCase_ ,answer_ids=lowerCamelCase_ ,max_length=lowerCamelCase_ ,return_tensors="""np""" )
self.assertEqual(len(lowerCamelCase_ ) ,2 )
self.assertEqual(len(lowerCamelCase_ ) ,2 )
self.assertEqual(len(lowerCamelCase_ ) ,2 )
self.assertEqual(concat_inputs.input_ids.shape ,(2, 10) )
self.assertEqual(concat_inputs.attention_mask.shape ,(2, 10) )
self.assertEqual(concat_inputs.token_type_ids.shape ,(2, 10) )
self.assertEqual(concat_inputs.special_tokens_mask.shape ,(2, 10) )
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[0] ) ,["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """first""", """record""", """[SEP]"""] ,)
self.assertEqual(
tokenizer.convert_ids_to_tokens(concat_inputs.input_ids[1] ) ,["""[CLS]""", """test""", """question""", """[SEP]""", """this""", """is""", """the""", """fourth""", """record""", """[SEP]"""] ,)
def A__ ( self: Optional[int] ) -> Optional[int]:
UpperCAmelCase_ : List[Any] = self.get_config()
UpperCAmelCase_ : Optional[int] = self.get_dummy_retriever()
UpperCAmelCase_ : str = retriever.tokenizer
UpperCAmelCase_ : List[Any] = np.array([0, 3, 5] ,dtype="""long""" )
UpperCAmelCase_ : Tuple = tokenizer(["""Test question"""] ).input_ids
UpperCAmelCase_ : Dict = tokenizer(
["""the fourth""", """longer longer"""] ,add_special_tokens=lowerCamelCase_ ,return_token_type_ids=lowerCamelCase_ ,return_attention_mask=lowerCamelCase_ ,).input_ids
UpperCAmelCase_ : Dict = config.reader_seq_len
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = retriever(
lowerCamelCase_ ,lowerCamelCase_ ,answer_ids=lowerCamelCase_ ,max_length=lowerCamelCase_ ,return_tensors="""np""" )
self.assertEqual([False, True, True] ,lowerCamelCase_ )
self.assertEqual([[-1, -1, -1], [6, -1, -1], [6, 7, 8]] ,lowerCamelCase_ )
self.assertEqual([[-1, -1, -1], [7, -1, -1], [7, 8, 9]] ,lowerCamelCase_ )
def A__ ( self: str ) -> Tuple:
UpperCAmelCase_ : Any = self.get_dummy_retriever()
retriever.save_pretrained(os.path.join(self.tmpdirname ,"""realm_block_records""" ) )
# Test local path
UpperCAmelCase_ : Optional[int] = retriever.from_pretrained(os.path.join(self.tmpdirname ,"""realm_block_records""" ) )
self.assertEqual(retriever.block_records[0] ,b"""This is the first record""" )
# Test mocked remote path
with patch("""transformers.models.realm.retrieval_realm.hf_hub_download""" ) as mock_hf_hub_download:
UpperCAmelCase_ : List[Any] = os.path.join(
os.path.join(self.tmpdirname ,"""realm_block_records""" ) ,_REALM_BLOCK_RECORDS_FILENAME )
UpperCAmelCase_ : int = RealmRetriever.from_pretrained("""google/realm-cc-news-pretrained-openqa""" )
self.assertEqual(retriever.block_records[0] ,b"""This is the first record""" )
| 322 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.auto import AutoModelForSequenceClassification, AutoTokenizer
from .base import PipelineTool
class _snake_case ( __snake_case ):
'''simple docstring'''
A__ : List[Any] = "facebook/bart-large-mnli"
A__ : List[Any] = (
"This is a tool that classifies an English text using provided labels. It takes two inputs: `text`, which "
"should be the text to classify, and `labels`, which should be the list of labels to use for classification. "
"It returns the most likely label in the list of provided `labels` for the input text."
)
A__ : str = "text_classifier"
A__ : Tuple = AutoTokenizer
A__ : int = AutoModelForSequenceClassification
A__ : List[str] = ["text", ["text"]]
A__ : Dict = ["text"]
def A__ ( self: List[str] ) -> List[str]:
super().setup()
UpperCAmelCase_ : Dict = self.model.config
UpperCAmelCase_ : Dict = -1
for idx, label in config.idalabel.items():
if label.lower().startswith("""entail""" ):
UpperCAmelCase_ : List[str] = int(lowerCamelCase_ )
if self.entailment_id == -1:
raise ValueError("""Could not determine the entailment ID from the model config, please pass it at init.""" )
def A__ ( self: Dict ,lowerCamelCase_: Optional[int] ,lowerCamelCase_: List[Any] ) -> Optional[int]:
UpperCAmelCase_ : str = labels
return self.pre_processor(
[text] * len(lowerCamelCase_ ) ,[F'''This example is {label}''' for label in labels] ,return_tensors="""pt""" ,padding="""max_length""" ,)
def A__ ( self: int ,lowerCamelCase_: Union[str, Any] ) -> Optional[int]:
UpperCAmelCase_ : int = outputs.logits
UpperCAmelCase_ : Tuple = torch.argmax(logits[:, 2] ).item()
return self._labels[label_id]
| 322 | 1 |
'''simple docstring'''
def lowercase__( __UpperCamelCase: int = 10_00 ):
"""simple docstring"""
return sum(e for e in range(3 ,__UpperCamelCase ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 28 |
"""simple docstring"""
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
UpperCAmelCase =True
from torch.cuda.amp import autocast
UpperCAmelCase =logging.getLogger(__name__)
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
_lowerCamelCase = field(
metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from huggingface.co'''} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Whether to log verbose messages or not.'''} , )
_lowerCamelCase = field(
default=2.0 , metadata={'''help''': '''Maximum temperature for gumbel softmax.'''} )
_lowerCamelCase = field(
default=0.5 , metadata={'''help''': '''Minimum temperature for gumbel softmax.'''} )
_lowerCamelCase = field(
default=0.9_9_9_9_9_5 , metadata={'''help''': '''Decay of gumbel temperature during training.'''} )
def _A ( _a : ModelArguments , _a : TrainingArguments ):
"""simple docstring"""
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
A = logging.WARNING
if model_args.verbose_logging:
A = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
A = logging.INFO
logger.setLevel(_a )
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''The name of the dataset to use (via the datasets library).'''} )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
_lowerCamelCase = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
_lowerCamelCase = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the validation data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
_lowerCamelCase = field(
default='''file''' , metadata={'''help''': '''Column in the dataset that contains speech file path. Defaults to \'file\''''} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''Overwrite the cached preprocessed datasets or not.'''} )
_lowerCamelCase = field(
default=1 , metadata={
'''help''': '''The percentage of the train set used as validation set in case there\'s no validation split'''
} , )
_lowerCamelCase = field(
default=SCREAMING_SNAKE_CASE , metadata={'''help''': '''The number of processes to use for the preprocessing.'''} , )
_lowerCamelCase = field(
default=2_0.0 , metadata={'''help''': '''Filter audio files that are longer than `max_duration_in_seconds` seconds'''} )
@dataclass
class lowerCamelCase__ :
'''simple docstring'''
_lowerCamelCase = 42
_lowerCamelCase = 42
_lowerCamelCase = "longest"
_lowerCamelCase = None
_lowerCamelCase = None
def __call__( self ,lowerCamelCase_ ) -> Dict[str, torch.Tensor]:
# reformat list to dict and set to pytorch format
A = self.feature_extractor.pad(
lowerCamelCase_ ,max_length=self.max_length ,padding=self.padding ,pad_to_multiple_of=self.pad_to_multiple_of ,return_tensors="""pt""" ,)
A = self.model._get_feat_extract_output_lengths(batch["""input_values"""].shape[-1] )
A = batch["""input_values"""].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
A = self.model._get_feat_extract_output_lengths(batch["""attention_mask"""].sum(-1 ) ).to(
torch.long )
A = torch.zeros(
(batch_size, mask_indices_seq_length) ,dtype=torch.long ,device=batch["""input_values"""].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
A = 1
A = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
A = _compute_mask_indices(
(batch_size, mask_indices_seq_length) ,self.model.config.mask_time_prob ,self.model.config.mask_time_length ,attention_mask=lowerCamelCase_ ,min_masks=2 ,)
return batch
class lowerCamelCase__ ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self ,*lowerCamelCase_ ,lowerCamelCase_=1 ,lowerCamelCase_=0 ,lowerCamelCase_=1.0 ,**lowerCamelCase_ ) -> Union[str, Any]:
super().__init__(*lowerCamelCase_ ,**lowerCamelCase_ )
A = 0
A = max_gumbel_temp
A = min_gumbel_temp
A = gumbel_temp_decay
def UpperCamelCase__ ( self ,lowerCamelCase_ ,lowerCamelCase_ ) -> torch.Tensor:
model.train()
A = self._prepare_inputs(lowerCamelCase_ )
if self.use_amp:
with autocast():
A = self.compute_loss(lowerCamelCase_ ,lowerCamelCase_ )
else:
A = self.compute_loss(lowerCamelCase_ ,lowerCamelCase_ )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
A = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
A = loss.sum() / (inputs["""mask_time_indices"""]).sum()
else:
raise ValueError(f'{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']' )
if self.args.gradient_accumulation_steps > 1:
A = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(lowerCamelCase_ ).backward()
elif self.use_apex:
with amp.scale_loss(lowerCamelCase_ ,self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(lowerCamelCase_ )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step ,self.min_gumbel_temp ) )
return loss.detach()
def _A ( ):
"""simple docstring"""
A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
A , A , A = parser.parse_args_into_dataclasses()
configure_logger(_a , _a )
# Downloading and loading a dataset from the hub.
A = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
A = DatasetDict()
A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'{data_args.train_split_name}[:{data_args.validation_split_percentage}%]' , cache_dir=model_args.cache_dir , )
A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'{data_args.train_split_name}[{data_args.validation_split_percentage}%:]' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
A = DatasetDict()
A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split="""validation""" , cache_dir=model_args.cache_dir , )
A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'{data_args.train_split_name}' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
A = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=_a )
def prepare_dataset(_a : Dict ):
# check that all files have the correct sampling rate
A , A = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
A = datasets.map(
_a , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets["""train"""].column_names )
# filter audio files that are too long
A = vectorized_datasets.filter(
lambda _a : len(data["""speech"""] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(_a : Optional[Any] ):
return feature_extractor(batch["""speech"""] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
A = vectorized_datasets.map(
_a , batched=_a , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets["""train"""].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
A = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
"""PreTraining is only supported for ``config.do_stable_layer_norm=True`` and"""
""" ``config.feat_extract_norm='layer'""" )
A = WavaVecaForPreTraining(_a )
A = DataCollatorForWavaVecaPretraining(model=_a , feature_extractor=_a )
A = WavaVecaPreTrainer(
model=_a , data_collator=_a , args=_a , train_dataset=vectorized_datasets["""train"""] , eval_dataset=vectorized_datasets["""validation"""] , tokenizer=_a , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 617 | 0 |
from __future__ import annotations
from typing import Dict
from ...configuration_utils import PretrainedConfig
UpperCamelCase__ : Any = {
"""susnato/ernie-m-base_pytorch""": """https://huggingface.co/susnato/ernie-m-base_pytorch/blob/main/config.json""",
"""susnato/ernie-m-large_pytorch""": """https://huggingface.co/susnato/ernie-m-large_pytorch/blob/main/config.json""",
}
class _UpperCamelCase ( A_ ):
'''simple docstring'''
lowerCamelCase : Dict = 'ernie_m'
lowerCamelCase : Dict[str, str] = {"dropout": "classifier_dropout", "num_classes": "num_labels"}
def __init__( self : int , __lowercase : int = 25_00_02 , __lowercase : int = 7_68 , __lowercase : int = 12 , __lowercase : int = 12 , __lowercase : int = 30_72 , __lowercase : str = "gelu" , __lowercase : float = 0.1 , __lowercase : float = 0.1 , __lowercase : int = 5_14 , __lowercase : float = 0.02 , __lowercase : int = 1 , __lowercase : float = 1e-05 , __lowercase : Union[str, Any]=None , __lowercase : int=False , __lowercase : List[Any]=0.0 , **__lowercase : Any , ):
'''simple docstring'''
super().__init__(pad_token_id=__lowercase , **__lowercase )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = classifier_dropout
UpperCAmelCase_ = is_decoder
UpperCAmelCase_ = act_dropout
| 486 |
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
UpperCamelCase__ : Tuple = 5_00_00
UpperCamelCase__ : Any = 50_00
UpperCamelCase__ , UpperCamelCase__ : Tuple = os.path.split(__file__)
UpperCamelCase__ : Union[str, Any] = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def A_( A , A ):
for i in range(A ):
UpperCAmelCase_ = dataset[i]
@get_duration
def A_( A , A , A ):
for i in range(0 , len(A ) , A ):
UpperCAmelCase_ = dataset[i : i + batch_size]
@get_duration
def A_( A , A , A ):
with dataset.formatted_as(type=A ):
for i in range(A ):
UpperCAmelCase_ = dataset[i]
@get_duration
def A_( A , A , A , A ):
with dataset.formatted_as(type=A ):
for i in range(0 , A , A ):
UpperCAmelCase_ = dataset[i : i + batch_size]
def A_( ):
UpperCAmelCase_ = {"""num examples""": SPEED_TEST_N_EXAMPLES}
UpperCAmelCase_ = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1000}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """pandas""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """torch""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """tensorflow""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1000}),
]
UpperCAmelCase_ = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1000}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("""generating dataset""" )
UpperCAmelCase_ = datasets.Features(
{"""list""": datasets.Sequence(datasets.Value("""float32""" ) ), """numbers""": datasets.Value("""float32""" )} )
UpperCAmelCase_ = generate_example_dataset(
os.path.join(A , """dataset.arrow""" ) , A , num_examples=A , seq_shapes={"""list""": (100,)} , )
print("""first set of iterations""" )
for func, kwargs in functions:
print(func.__name__ , str(A ) )
UpperCAmelCase_ = func(A , **A )
print("""shuffling dataset""" )
UpperCAmelCase_ = dataset.shuffle()
print("""Second set of iterations (after shuffling""" )
for func, kwargs in functions_shuffled:
print("""shuffled """ , func.__name__ , str(A ) )
UpperCAmelCase_ = func(
A , **A )
with open(A , """wb""" ) as f:
f.write(json.dumps(A ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 486 | 1 |
"""simple docstring"""
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextModel,
CLIPTokenizer,
WhisperForConditionalGeneration,
WhisperProcessor,
)
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.utils import logging
A_ : Union[str, Any] =logging.get_logger(__name__) # pylint: disable=invalid-name
class __a ( lowerCAmelCase__ ):
def __init__( self , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , a__ , ):
super().__init__()
if safety_checker is None:
logger.warning(
F'You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure'
' that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered'
' results in services or applications open to the public. Both the diffusers team and Hugging Face'
' strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling'
' it only for use-cases that involve analyzing network behavior or auditing its results. For more'
' information, please have a look at https://github.com/huggingface/diffusers/pull/254 .' )
self.register_modules(
speech_model=a__ , speech_processor=a__ , vae=a__ , text_encoder=a__ , tokenizer=a__ , unet=a__ , scheduler=a__ , feature_extractor=a__ , )
def snake_case_ ( self , a__ = "auto" ):
if slice_size == "auto":
_lowerCamelCase = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(a__ )
def snake_case_ ( self ):
self.enable_attention_slicing(a__ )
@torch.no_grad()
def __call__( self , a__ , a__=1_60_00 , a__ = 5_12 , a__ = 5_12 , a__ = 50 , a__ = 7.5 , a__ = None , a__ = 1 , a__ = 0.0 , a__ = None , a__ = None , a__ = "pil" , a__ = True , a__ = None , a__ = 1 , **a__ , ):
_lowerCamelCase = self.speech_processor.feature_extractor(
a__ , return_tensors='pt' , sampling_rate=a__ ).input_features.to(self.device )
_lowerCamelCase = self.speech_model.generate(a__ , max_length=48_00_00 )
_lowerCamelCase = self.speech_processor.tokenizer.batch_decode(a__ , skip_special_tokens=a__ , normalize=a__ )[
0
]
if isinstance(a__ , a__ ):
_lowerCamelCase = 1
elif isinstance(a__ , a__ ):
_lowerCamelCase = len(a__ )
else:
raise ValueError(F'`prompt` has to be of type `str` or `list` but is {type(a__ )}' )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'`height` and `width` have to be divisible by 8 but are {height} and {width}.' )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(a__ , a__ ) or callback_steps <= 0)
):
raise ValueError(
F'`callback_steps` has to be a positive integer but is {callback_steps} of type'
F' {type(a__ )}.' )
# get prompt text embeddings
_lowerCamelCase = self.tokenizer(
a__ , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
_lowerCamelCase = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
_lowerCamelCase = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
F' {self.tokenizer.model_max_length} tokens: {removed_text}' )
_lowerCamelCase = text_input_ids[:, : self.tokenizer.model_max_length]
_lowerCamelCase = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase = text_embeddings.shape
_lowerCamelCase = text_embeddings.repeat(1 , a__ , 1 )
_lowerCamelCase = text_embeddings.view(bs_embed * num_images_per_prompt , a__ , -1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
_lowerCamelCase = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
_lowerCamelCase = 42
if negative_prompt is None:
_lowerCamelCase = [''] * batch_size
elif type(a__ ) is not type(a__ ):
raise TypeError(
F'`negative_prompt` should be the same type to `prompt`, but got {type(a__ )} !='
F' {type(a__ )}.' )
elif isinstance(a__ , a__ ):
_lowerCamelCase = [negative_prompt]
elif batch_size != len(a__ ):
raise ValueError(
F'`negative_prompt`: {negative_prompt} has batch size {len(a__ )}, but `prompt`:'
F' {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches'
' the batch size of `prompt`.' )
else:
_lowerCamelCase = negative_prompt
_lowerCamelCase = text_input_ids.shape[-1]
_lowerCamelCase = self.tokenizer(
a__ , padding='max_length' , max_length=a__ , truncation=a__ , return_tensors='pt' , )
_lowerCamelCase = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_lowerCamelCase = uncond_embeddings.shape[1]
_lowerCamelCase = uncond_embeddings.repeat(1 , a__ , 1 )
_lowerCamelCase = uncond_embeddings.view(batch_size * num_images_per_prompt , a__ , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_lowerCamelCase = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
_lowerCamelCase = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
_lowerCamelCase = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
_lowerCamelCase = torch.randn(a__ , generator=a__ , device='cpu' , dtype=a__ ).to(
self.device )
else:
_lowerCamelCase = torch.randn(a__ , generator=a__ , device=self.device , dtype=a__ )
else:
if latents.shape != latents_shape:
raise ValueError(F'Unexpected latents shape, got {latents.shape}, expected {latents_shape}' )
_lowerCamelCase = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(a__ )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
_lowerCamelCase = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
_lowerCamelCase = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
_lowerCamelCase = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
_lowerCamelCase = {}
if accepts_eta:
_lowerCamelCase = eta
for i, t in enumerate(self.progress_bar(a__ ) ):
# expand the latents if we are doing classifier free guidance
_lowerCamelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCamelCase = self.scheduler.scale_model_input(a__ , a__ )
# predict the noise residual
_lowerCamelCase = self.unet(a__ , a__ , encoder_hidden_states=a__ ).sample
# perform guidance
if do_classifier_free_guidance:
_lowerCamelCase , _lowerCamelCase = noise_pred.chunk(2 )
_lowerCamelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
_lowerCamelCase = self.scheduler.step(a__ , a__ , a__ , **a__ ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(a__ , a__ , a__ )
_lowerCamelCase = 1 / 0.18215 * latents
_lowerCamelCase = self.vae.decode(a__ ).sample
_lowerCamelCase = (image / 2 + 0.5).clamp(0 , 1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
_lowerCamelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_lowerCamelCase = self.numpy_to_pil(a__ )
if not return_dict:
return image
return StableDiffusionPipelineOutput(images=a__ , nsfw_content_detected=a__ )
| 650 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : List[str] =logging.get_logger(__name__)
A_ : List[str] ={
"""microsoft/biogpt""": """https://huggingface.co/microsoft/biogpt/resolve/main/config.json""",
# See all BioGPT models at https://huggingface.co/models?filter=biogpt
}
class __a ( lowerCAmelCase__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = "biogpt"
def __init__( self , a__=4_23_84 , a__=10_24 , a__=24 , a__=16 , a__=40_96 , a__="gelu" , a__=0.1 , a__=0.1 , a__=10_24 , a__=0.02 , a__=1e-12 , a__=True , a__=True , a__=0.0 , a__=0.0 , a__=1 , a__=0 , a__=2 , **a__ , ):
_lowerCamelCase = vocab_size
_lowerCamelCase = max_position_embeddings
_lowerCamelCase = hidden_size
_lowerCamelCase = num_hidden_layers
_lowerCamelCase = num_attention_heads
_lowerCamelCase = intermediate_size
_lowerCamelCase = hidden_act
_lowerCamelCase = hidden_dropout_prob
_lowerCamelCase = attention_probs_dropout_prob
_lowerCamelCase = initializer_range
_lowerCamelCase = layer_norm_eps
_lowerCamelCase = scale_embedding
_lowerCamelCase = use_cache
_lowerCamelCase = layerdrop
_lowerCamelCase = activation_dropout
super().__init__(pad_token_id=a__ , bos_token_id=a__ , eos_token_id=a__ , **a__ )
| 650 | 1 |
import argparse
import gdown
import numpy as np
import torch
from huggingface_hub import hf_hub_download
from transformers import (
CLIPTokenizer,
CLIPTokenizerFast,
VideoMAEImageProcessor,
XCLIPConfig,
XCLIPModel,
XCLIPProcessor,
XCLIPTextConfig,
XCLIPVisionConfig,
)
def __UpperCamelCase ( A , A ):
UpperCamelCase__ = XCLIPTextConfig()
# derive patch size from model name
UpperCamelCase__ = model_name.find('''patch''' )
UpperCamelCase__ = int(model_name[start_idx + len('''patch''' ) : start_idx + len('''patch''' ) + 2] )
UpperCamelCase__ = XCLIPVisionConfig(patch_size=A , num_frames=A )
if "large" in model_name:
UpperCamelCase__ = 768
UpperCamelCase__ = 3072
UpperCamelCase__ = 12
UpperCamelCase__ = 1024
UpperCamelCase__ = 4096
UpperCamelCase__ = 16
UpperCamelCase__ = 24
UpperCamelCase__ = 768
UpperCamelCase__ = 3072
if model_name == "xclip-large-patch14-16-frames":
UpperCamelCase__ = 336
UpperCamelCase__ = XCLIPConfig.from_text_vision_configs(A , A )
if "large" in model_name:
UpperCamelCase__ = 768
return config
def __UpperCamelCase ( A ):
# text encoder
if name == "token_embedding.weight":
UpperCamelCase__ = name.replace('''token_embedding.weight''' , '''text_model.embeddings.token_embedding.weight''' )
if name == "positional_embedding":
UpperCamelCase__ = name.replace('''positional_embedding''' , '''text_model.embeddings.position_embedding.weight''' )
if "ln_1" in name:
UpperCamelCase__ = name.replace('''ln_1''' , '''layer_norm1''' )
if "ln_2" in name:
UpperCamelCase__ = name.replace('''ln_2''' , '''layer_norm2''' )
if "c_fc" in name:
UpperCamelCase__ = name.replace('''c_fc''' , '''fc1''' )
if "c_proj" in name:
UpperCamelCase__ = name.replace('''c_proj''' , '''fc2''' )
if name.startswith('''transformer.resblocks''' ):
UpperCamelCase__ = name.replace('''transformer.resblocks''' , '''text_model.encoder.layers''' )
if "attn.out_proj" in name and "message" not in name:
UpperCamelCase__ = name.replace('''attn.out_proj''' , '''self_attn.out_proj''' )
if "ln_final" in name:
UpperCamelCase__ = name.replace('''ln_final''' , '''text_model.final_layer_norm''' )
# visual encoder
if name == "visual.class_embedding":
UpperCamelCase__ = name.replace('''visual.class_embedding''' , '''vision_model.embeddings.class_embedding''' )
if name == "visual.positional_embedding":
UpperCamelCase__ = name.replace('''visual.positional_embedding''' , '''vision_model.embeddings.position_embedding.weight''' )
if name.startswith('''visual.transformer.resblocks''' ):
UpperCamelCase__ = name.replace('''visual.transformer.resblocks''' , '''vision_model.encoder.layers''' )
if "visual.conv1" in name:
UpperCamelCase__ = name.replace('''visual.conv1''' , '''vision_model.embeddings.patch_embedding''' )
if "visual.ln_pre" in name:
UpperCamelCase__ = name.replace('''visual.ln_pre''' , '''vision_model.pre_layernorm''' )
if "visual.ln_post" in name:
UpperCamelCase__ = name.replace('''visual.ln_post''' , '''vision_model.post_layernorm''' )
if "visual.proj" in name:
UpperCamelCase__ = name.replace('''visual.proj''' , '''visual_projection.weight''' )
if "text_projection" in name:
UpperCamelCase__ = name.replace('''text_projection''' , '''text_projection.weight''' )
# things on top
if "prompts_visual_proj" in name:
UpperCamelCase__ = name.replace('''prompts_visual_proj''' , '''prompts_visual_projection''' )
if "prompts_visual_ln" in name:
UpperCamelCase__ = name.replace('''prompts_visual_ln''' , '''prompts_visual_layernorm''' )
# mit
if name == "mit.positional_embedding":
UpperCamelCase__ = name.replace('''positional''' , '''position''' )
if name.startswith('''mit.resblocks''' ):
UpperCamelCase__ = name.replace('''mit.resblocks''' , '''mit.encoder.layers''' )
# prompts generator
if name.startswith('''prompts_generator.norm''' ):
UpperCamelCase__ = name.replace('''prompts_generator.norm''' , '''prompts_generator.layernorm''' )
return name
def __UpperCamelCase ( A , A ):
for key in orig_state_dict.copy().keys():
UpperCamelCase__ = orig_state_dict.pop(A )
if "attn.in_proj" in key:
UpperCamelCase__ = key.split('''.''' )
if key.startswith('''visual''' ):
UpperCamelCase__ = key_split[3]
UpperCamelCase__ = config.vision_config.hidden_size
if "message_attn" in key:
if "weight" in key:
UpperCamelCase__ = val[
:dim, :
]
UpperCamelCase__ = val[
dim : dim * 2, :
]
UpperCamelCase__ = val[
-dim:, :
]
else:
UpperCamelCase__ = val[
:dim
]
UpperCamelCase__ = val[
dim : dim * 2
]
UpperCamelCase__ = val[
-dim:
]
else:
if "weight" in key:
UpperCamelCase__ = val[
:dim, :
]
UpperCamelCase__ = val[
dim : dim * 2, :
]
UpperCamelCase__ = val[
-dim:, :
]
else:
UpperCamelCase__ = val[:dim]
UpperCamelCase__ = val[
dim : dim * 2
]
UpperCamelCase__ = val[-dim:]
elif key.startswith('''mit''' ):
UpperCamelCase__ = key_split[2]
UpperCamelCase__ = config.vision_config.mit_hidden_size
if "weight" in key:
UpperCamelCase__ = val[:dim, :]
UpperCamelCase__ = val[dim : dim * 2, :]
UpperCamelCase__ = val[-dim:, :]
else:
UpperCamelCase__ = val[:dim]
UpperCamelCase__ = val[dim : dim * 2]
UpperCamelCase__ = val[-dim:]
else:
UpperCamelCase__ = key_split[2]
UpperCamelCase__ = config.text_config.hidden_size
if "weight" in key:
UpperCamelCase__ = val[:dim, :]
UpperCamelCase__ = val[
dim : dim * 2, :
]
UpperCamelCase__ = val[-dim:, :]
else:
UpperCamelCase__ = val[:dim]
UpperCamelCase__ = val[
dim : dim * 2
]
UpperCamelCase__ = val[-dim:]
else:
UpperCamelCase__ = rename_key(A )
if new_key_name in ["visual_projection.weight", "text_projection.weight"]:
UpperCamelCase__ = val.T
UpperCamelCase__ = val
return orig_state_dict
def __UpperCamelCase ( A ):
if num_frames == 8:
UpperCamelCase__ = '''eating_spaghetti_8_frames.npy'''
elif num_frames == 16:
UpperCamelCase__ = '''eating_spaghetti.npy'''
elif num_frames == 32:
UpperCamelCase__ = '''eating_spaghetti_32_frames.npy'''
UpperCamelCase__ = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename=A , repo_type='''dataset''' , )
UpperCamelCase__ = np.load(A )
return list(A )
def __UpperCamelCase ( A , A=None , A=False ):
UpperCamelCase__ = {
# fully supervised kinetics-400 checkpoints
'''xclip-base-patch32''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_8.pth''',
'''xclip-base-patch32-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_32_16.pth'''
),
'''xclip-base-patch16''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_8.pth''',
'''xclip-base-patch16-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k400_16_16.pth'''
),
'''xclip-large-patch14''': '''https://drive.google.com/u/0/uc?id=1NUOImq0o5DlQTST17iIP3vG7DgmHQuCx&export=download&confirm=t&uuid=b26caedc-88e2-473e-830a-9d158b653cdb''',
'''xclip-large-patch14-16-frames''': '''https://drive.google.com/u/0/uc?id=1FOYgnJc097OJ4lGwtRCCydQyVPJEOH7d&export=download&confirm=t&uuid=538fa810-e671-4050-b385-9a623f89804f''',
# fully supervised kinetics-600 checkpoints
'''xclip-base-patch16-kinetics-600''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_8.pth'''
),
'''xclip-base-patch16-kinetics-600-16-frames''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/k600_16_16.pth'''
),
'''xclip-large-patch14-kinetics-600''': '''https://drive.google.com/u/0/uc?id=1FV8C1INuM91sLAN4ImjzePLIlpMSihwV&export=download&confirm=t&uuid=141d4977-4a65-44ae-864f-4b0c19f838be''',
# few shot
'''xclip-base-patch16-hmdb-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_2.pth'''
),
'''xclip-base-patch16-hmdb-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_4.pth'''
),
'''xclip-base-patch16-hmdb-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_8.pth'''
),
'''xclip-base-patch16-hmdb-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_hmdb_16.pth'''
),
'''xclip-base-patch16-ucf-2-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_2.pth'''
),
'''xclip-base-patch16-ucf-4-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_4.pth'''
),
'''xclip-base-patch16-ucf-8-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_8.pth'''
),
'''xclip-base-patch16-ucf-16-shot''': (
'''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/few_ucf_16.pth'''
),
# zero shot
'''xclip-base-patch16-zero-shot''': '''https://github.com/nbl97/X-CLIP_Model_Zoo/releases/download/v1.0/zero.pth''',
}
UpperCamelCase__ = model_to_url[model_name]
UpperCamelCase__ = 8
if "16-frames" in model_name:
UpperCamelCase__ = 16
elif "shot" in model_name:
UpperCamelCase__ = 32
UpperCamelCase__ = get_xclip_config(A , A )
UpperCamelCase__ = XCLIPModel(A )
model.eval()
if "drive" in checkpoint_url:
UpperCamelCase__ = '''pytorch_model.bin'''
gdown.cached_download(A , A , quiet=A )
UpperCamelCase__ = torch.load(A , map_location='''cpu''' )['''model''']
else:
UpperCamelCase__ = torch.hub.load_state_dict_from_url(A )['''model''']
UpperCamelCase__ = convert_state_dict(A , A )
UpperCamelCase__ = XCLIPModel(A )
UpperCamelCase__ , UpperCamelCase__ = model.load_state_dict(A , strict=A )
assert missing_keys == ["text_model.embeddings.position_ids", "vision_model.embeddings.position_ids"]
model.eval()
UpperCamelCase__ = 336 if model_name == '''xclip-large-patch14-16-frames''' else 224
UpperCamelCase__ = VideoMAEImageProcessor(size=A )
UpperCamelCase__ = CLIPTokenizer.from_pretrained('''openai/clip-vit-base-patch32''' )
UpperCamelCase__ = CLIPTokenizerFast.from_pretrained('''openai/clip-vit-base-patch32''' )
UpperCamelCase__ = XCLIPProcessor(image_processor=A , tokenizer=A )
UpperCamelCase__ = prepare_video(A )
UpperCamelCase__ = processor(
text=['''playing sports''', '''eating spaghetti''', '''go shopping'''] , videos=A , return_tensors='''pt''' , padding=A )
print('''Shape of pixel values:''' , inputs.pixel_values.shape )
with torch.no_grad():
UpperCamelCase__ = model(**A )
# Verify outputs
UpperCamelCase__ = outputs.logits_per_video
UpperCamelCase__ = logits_per_video.softmax(dim=1 )
print('''Probs:''' , A )
# kinetics-400
if model_name == "xclip-base-patch32":
UpperCamelCase__ = torch.tensor([[0.00_19, 0.99_51, 0.00_30]] )
elif model_name == "xclip-base-patch32-16-frames":
UpperCamelCase__ = torch.tensor([[7.09_99e-04, 9.98_83e-01, 4.55_80e-04]] )
elif model_name == "xclip-base-patch16":
UpperCamelCase__ = torch.tensor([[0.00_83, 0.96_81, 0.02_36]] )
elif model_name == "xclip-base-patch16-16-frames":
UpperCamelCase__ = torch.tensor([[7.69_37e-04, 9.97_28e-01, 1.94_73e-03]] )
elif model_name == "xclip-large-patch14":
UpperCamelCase__ = torch.tensor([[0.00_62, 0.98_64, 0.00_75]] )
elif model_name == "xclip-large-patch14-16-frames":
UpperCamelCase__ = torch.tensor([[3.38_77e-04, 9.99_37e-01, 2.88_88e-04]] )
# kinetics-600
elif model_name == "xclip-base-patch16-kinetics-600":
UpperCamelCase__ = torch.tensor([[0.05_55, 0.89_14, 0.05_31]] )
elif model_name == "xclip-base-patch16-kinetics-600-16-frames":
UpperCamelCase__ = torch.tensor([[3.85_54e-04, 9.99_29e-01, 3.27_54e-04]] )
elif model_name == "xclip-large-patch14-kinetics-600":
UpperCamelCase__ = torch.tensor([[0.00_36, 0.99_20, 0.00_45]] )
# few shot
elif model_name == "xclip-base-patch16-hmdb-2-shot":
UpperCamelCase__ = torch.tensor([[7.18_90e-06, 9.99_94e-01, 5.65_59e-05]] )
elif model_name == "xclip-base-patch16-hmdb-4-shot":
UpperCamelCase__ = torch.tensor([[1.03_20e-05, 9.99_93e-01, 6.24_35e-05]] )
elif model_name == "xclip-base-patch16-hmdb-8-shot":
UpperCamelCase__ = torch.tensor([[4.13_77e-06, 9.99_90e-01, 9.83_86e-05]] )
elif model_name == "xclip-base-patch16-hmdb-16-shot":
UpperCamelCase__ = torch.tensor([[4.13_47e-05, 9.99_62e-01, 3.34_11e-04]] )
elif model_name == "xclip-base-patch16-ucf-2-shot":
UpperCamelCase__ = torch.tensor([[8.58_57e-05, 9.99_28e-01, 6.32_91e-04]] )
elif model_name == "xclip-base-patch16-ucf-4-shot":
UpperCamelCase__ = torch.tensor([[8.58_57e-05, 9.99_28e-01, 6.32_91e-04]] )
elif model_name == "xclip-base-patch16-ucf-8-shot":
UpperCamelCase__ = torch.tensor([[0.00_27, 0.99_04, 0.00_70]] )
elif model_name == "xclip-base-patch16-ucf-16-shot":
UpperCamelCase__ = torch.tensor([[9.82_19e-04, 9.95_93e-01, 3.08_63e-03]] )
# zero shot
elif model_name == "xclip-base-patch16-zero-shot":
UpperCamelCase__ = torch.tensor([[3.50_82e-04, 9.97_85e-01, 1.79_66e-03]] )
else:
raise ValueError(f"Model name {model_name} not supported" )
assert torch.allclose(A , A , atol=1e-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(A )
if push_to_hub:
print('''Pushing model, processor and slow tokenizer files to the hub...''' )
model.push_to_hub(A , organization='''nielsr''' )
processor.push_to_hub(A , organization='''nielsr''' )
slow_tokenizer.push_to_hub(A , organization='''nielsr''' )
if __name__ == "__main__":
__magic_name__ =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''xclip-base-patch32''',
type=str,
help='''Name of the model.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''', action='''store_true''', help='''Whether or not to push the converted model to the 🤗 hub.'''
)
__magic_name__ =parser.parse_args()
convert_xclip_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 469 | import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class _A ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ : List[str] =""
SCREAMING_SNAKE_CASE_ : str =(
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
SCREAMING_SNAKE_CASE_ : str =None # compression type in fsspec. ex: "gzip"
SCREAMING_SNAKE_CASE_ : str =None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__(self , SCREAMING_SNAKE_CASE_ = "" , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , **SCREAMING_SNAKE_CASE_ ) -> List[str]:
'''simple docstring'''
super().__init__(self , **SCREAMING_SNAKE_CASE_ )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
UpperCamelCase__ = fsspec.open(
SCREAMING_SNAKE_CASE_ , mode='''rb''' , protocol=SCREAMING_SNAKE_CASE_ , compression=self.compression , client_kwargs={
'''requote_redirect_url''': False, # see https://github.com/huggingface/datasets/pull/5459
'''trust_env''': True, # Enable reading proxy env variables.
**(target_options or {}).pop('''client_kwargs''' , {} ), # To avoid issues if it was already passed.
} , **(target_options or {}) , )
UpperCamelCase__ = os.path.basename(self.file.path.split('''::''' )[0] )
UpperCamelCase__ = (
self.compressed_name[: self.compressed_name.rindex('''.''' )]
if '''.''' in self.compressed_name
else self.compressed_name
)
UpperCamelCase__ = None
@classmethod
def _a (cls , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
'''simple docstring'''
return super()._strip_protocol(SCREAMING_SNAKE_CASE_ ).lstrip('''/''' )
def _a (self ) -> Union[str, Any]:
'''simple docstring'''
if self.dir_cache is None:
UpperCamelCase__ = {**self.file.fs.info(self.file.path ), '''name''': self.uncompressed_name}
UpperCamelCase__ = {f['''name''']: f}
def _a (self , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
return self.file.open().read()
def _a (self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = "rb" , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=True , SCREAMING_SNAKE_CASE_=None , **SCREAMING_SNAKE_CASE_ , ) -> List[Any]:
'''simple docstring'''
UpperCamelCase__ = self._strip_protocol(SCREAMING_SNAKE_CASE_ )
if mode != "rb":
raise ValueError(F"Tried to read with mode {mode} on file {self.file.path} opened with mode 'rb'" )
return self.file.open()
class _A ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ : Tuple ="bz2"
SCREAMING_SNAKE_CASE_ : List[str] ="bz2"
SCREAMING_SNAKE_CASE_ : List[Any] =".bz2"
class _A ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ : Tuple ="gzip"
SCREAMING_SNAKE_CASE_ : Dict ="gzip"
SCREAMING_SNAKE_CASE_ : List[Any] =".gz"
class _A ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ : List[Any] ="lz4"
SCREAMING_SNAKE_CASE_ : str ="lz4"
SCREAMING_SNAKE_CASE_ : Any =".lz4"
class _A ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ : Dict ="xz"
SCREAMING_SNAKE_CASE_ : List[str] ="xz"
SCREAMING_SNAKE_CASE_ : Optional[int] =".xz"
class _A ( __UpperCamelCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] ="zstd"
SCREAMING_SNAKE_CASE_ : Optional[int] ="zstd"
SCREAMING_SNAKE_CASE_ : Optional[int] =".zst"
def __init__(self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = "rb" , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = None , SCREAMING_SNAKE_CASE_ = DEFAULT_BLOCK_SIZE , **SCREAMING_SNAKE_CASE_ , ) -> List[Any]:
'''simple docstring'''
super().__init__(
fo=SCREAMING_SNAKE_CASE_ , mode=SCREAMING_SNAKE_CASE_ , target_protocol=SCREAMING_SNAKE_CASE_ , target_options=SCREAMING_SNAKE_CASE_ , block_size=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
UpperCamelCase__ = self.file.__enter__
class _A :
def __init__(self , SCREAMING_SNAKE_CASE_ ) -> int:
'''simple docstring'''
UpperCamelCase__ = file_
def __enter__(self ) -> Union[str, Any]:
'''simple docstring'''
self._file.__enter__()
return self
def __exit__(self , *SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) -> List[Any]:
'''simple docstring'''
self._file.__exit__(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
def __iter__(self ) -> Dict:
'''simple docstring'''
return iter(self._file )
def _a (self ) -> Dict:
'''simple docstring'''
return next(self._file )
def __getattr__(self , SCREAMING_SNAKE_CASE_ ) -> str:
'''simple docstring'''
return getattr(self._file , SCREAMING_SNAKE_CASE_ )
def fixed_enter(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ):
return WrappedFile(_enter(*SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ ) )
UpperCamelCase__ = fixed_enter
| 469 | 1 |
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
__snake_case :Any =logging.get_logger(__name__)
__snake_case :str ={
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
'constant': get_constant_schedule,
'constant_w_warmup': get_constant_schedule_with_warmup,
}
class lowerCAmelCase__ ( _lowerCamelCase ):
def __init__( self : Tuple , __UpperCamelCase : Any=None , __UpperCamelCase : List[Any]=None , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : Optional[Any] ) -> Tuple:
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
if config is None:
assert isinstance(self.model , __UpperCamelCase ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
f''' {self.model.__class__}'''
)
A = self.model.config
else:
A = config
A = data_args
A = self.config.tgt_vocab_size if isinstance(self.config , __UpperCamelCase ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
f'''The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for'''
' padding..' )
if self.args.label_smoothing == 0:
A = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
A = label_smoothed_nll_loss
def __UpperCamelCase ( self : int , __UpperCamelCase : int ) -> str:
if self.optimizer is None:
A = ['bias', 'LayerNorm.weight']
A = [
{
'params': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'weight_decay': self.args.weight_decay,
},
{
'params': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
A = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
A = Adafactor
A = {'scale_parameter': False, 'relative_step': False}
else:
A = AdamW
A = {
'betas': (self.args.adam_betaa, self.args.adam_betaa),
'eps': self.args.adam_epsilon,
}
A = self.args.learning_rate
if self.sharded_ddp:
A = OSS(
params=__UpperCamelCase , optim=__UpperCamelCase , **__UpperCamelCase , )
else:
A = optimizer_cls(__UpperCamelCase , **__UpperCamelCase )
if self.lr_scheduler is None:
A = self._get_lr_scheduler(__UpperCamelCase )
else: # ignoring --lr_scheduler
logger.warning('scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.' )
def __UpperCamelCase ( self : str , __UpperCamelCase : List[Any] ) -> Union[str, Any]:
A = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
A = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
A = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
A = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=__UpperCamelCase )
return scheduler
def __UpperCamelCase ( self : List[Any] ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def __UpperCamelCase ( self : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[int] ) -> int:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
A = model(**__UpperCamelCase , use_cache=__UpperCamelCase )[0]
A = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
A , A = model(**__UpperCamelCase , labels=__UpperCamelCase , use_cache=__UpperCamelCase )[:2]
else:
# compute label smoothed loss
A = model(**__UpperCamelCase , use_cache=__UpperCamelCase )[0]
A = torch.nn.functional.log_softmax(__UpperCamelCase , dim=-1 )
A , A = self.loss_fn(__UpperCamelCase , __UpperCamelCase , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def __UpperCamelCase ( self : str , __UpperCamelCase : Tuple , __UpperCamelCase : List[Any] ) -> List[Any]:
A = inputs.pop('labels' )
A , A = self._compute_loss(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return loss
def __UpperCamelCase ( self : int , __UpperCamelCase : nn.Module , __UpperCamelCase : Dict[str, Union[torch.Tensor, Any]] , __UpperCamelCase : bool , __UpperCamelCase : Optional[List[str]] = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
A = self._prepare_inputs(__UpperCamelCase )
A = {
'max_length': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'num_beams': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
A = self.model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , **__UpperCamelCase , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
A = self._pad_tensors_to_max_len(__UpperCamelCase , gen_kwargs['max_length'] )
A = inputs.pop('labels' )
with torch.no_grad():
# compute loss on predict data
A , A = self._compute_loss(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
A = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
A = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
A = self._pad_tensors_to_max_len(__UpperCamelCase , gen_kwargs['max_length'] )
return (loss, logits, labels)
def __UpperCamelCase ( self : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : Dict ) -> int:
# If PAD token is not defined at least EOS token has to be defined
A = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'
f''' padded to `max_length`={max_length}''' )
A = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
A = tensor
return padded_tensor | 106 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
_UpperCAmelCase : Any = logging.get_logger(__name__)
class __magic_name__ ( __SCREAMING_SNAKE_CASE ):
def __init__( self , *snake_case_ , **snake_case_ ):
warnings.warn(
'''The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'''
''' use YolosImageProcessor instead.''' , snake_case_ , )
super().__init__(*snake_case_ , **snake_case_ )
| 72 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE : Any = logging.get_logger(__name__)
class UpperCamelCase ( _UpperCAmelCase ):
a__ :str = """timm_backbone"""
def __init__(self , __UpperCamelCase=None , __UpperCamelCase=3 , __UpperCamelCase=True , __UpperCamelCase=True , __UpperCamelCase=None , **__UpperCamelCase , ) -> str:
super().__init__(**lowercase__ )
UpperCamelCase_ : int = backbone
UpperCamelCase_ : Union[str, Any] = num_channels
UpperCamelCase_ : Optional[Any] = features_only
UpperCamelCase_ : str = use_pretrained_backbone
UpperCamelCase_ : str = True
UpperCamelCase_ : List[Any] = out_indices if out_indices is not None else (-1,)
| 704 | from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE : Optional[Any] = {
"configuration_clap": [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapAudioConfig",
"ClapConfig",
"ClapTextConfig",
],
"processing_clap": ["ClapProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE : str = [
"CLAP_PRETRAINED_MODEL_ARCHIVE_LIST",
"ClapModel",
"ClapPreTrainedModel",
"ClapTextModel",
"ClapTextModelWithProjection",
"ClapAudioModel",
"ClapAudioModelWithProjection",
]
SCREAMING_SNAKE_CASE : List[Any] = ["ClapFeatureExtractor"]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
SCREAMING_SNAKE_CASE : Optional[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 138 | 0 |
"""simple docstring"""
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
__SCREAMING_SNAKE_CASE : List[str] = ["image_processor", "tokenizer"]
__SCREAMING_SNAKE_CASE : Union[str, Any] = "ViltImageProcessor"
__SCREAMING_SNAKE_CASE : List[str] = ("BertTokenizer", "BertTokenizerFast")
def __init__( self , lowercase_=None , lowercase_=None , **lowercase_ ) -> Any:
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , lowercase_ , )
UpperCAmelCase = kwargs.pop('feature_extractor' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(lowercase_ , lowercase_ )
UpperCAmelCase = self.image_processor
def __call__( self , lowercase_ , lowercase_ = None , lowercase_ = True , lowercase_ = False , lowercase_ = None , lowercase_ = None , lowercase_ = 0 , lowercase_ = None , lowercase_ = None , lowercase_ = None , lowercase_ = False , lowercase_ = False , lowercase_ = False , lowercase_ = False , lowercase_ = True , lowercase_ = None , **lowercase_ , ) -> BatchEncoding:
UpperCAmelCase = self.tokenizer(
text=lowercase_ , add_special_tokens=lowercase_ , padding=lowercase_ , truncation=lowercase_ , max_length=lowercase_ , stride=lowercase_ , pad_to_multiple_of=lowercase_ , return_token_type_ids=lowercase_ , return_attention_mask=lowercase_ , return_overflowing_tokens=lowercase_ , return_special_tokens_mask=lowercase_ , return_offsets_mapping=lowercase_ , return_length=lowercase_ , verbose=lowercase_ , return_tensors=lowercase_ , **lowercase_ , )
# add pixel_values + pixel_mask
UpperCAmelCase = self.image_processor(lowercase_ , return_tensors=lowercase_ )
encoding.update(lowercase_ )
return encoding
def a_ ( self , *lowercase_ , **lowercase_ ) -> List[Any]:
return self.tokenizer.batch_decode(*lowercase_ , **lowercase_ )
def a_ ( self , *lowercase_ , **lowercase_ ) -> Dict:
return self.tokenizer.decode(*lowercase_ , **lowercase_ )
@property
def a_ ( self ) -> str:
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def a_ ( self ) -> Union[str, Any]:
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , lowercase_ , )
return self.image_processor_class
@property
def a_ ( self ) -> List[str]:
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , lowercase_ , )
return self.image_processor
| 373 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE_ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE_ = {
'''tiiuae/falcon-40b''': '''https://huggingface.co/tiiuae/falcon-40b/resolve/main/config.json''',
'''tiiuae/falcon-7b''': '''https://huggingface.co/tiiuae/falcon-7b/resolve/main/config.json''',
}
class _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ ):
__SCREAMING_SNAKE_CASE : Optional[Any] = "falcon"
__SCREAMING_SNAKE_CASE : List[Any] = ["past_key_values"]
def __init__( self , lowercase_=6_5_0_2_4 , lowercase_=4_5_4_4 , lowercase_=3_2 , lowercase_=7_1 , lowercase_=1E-5 , lowercase_=0.0_2 , lowercase_=True , lowercase_=0.0 , lowercase_=0.0 , lowercase_=None , lowercase_=False , lowercase_=False , lowercase_=True , lowercase_=True , lowercase_=False , lowercase_=1_1 , lowercase_=1_1 , **lowercase_ , ) -> Any:
UpperCAmelCase = vocab_size
# Backward compatibility with n_embed kwarg
UpperCAmelCase = kwargs.pop('n_embed' , lowercase_ )
UpperCAmelCase = hidden_size if n_embed is None else n_embed
UpperCAmelCase = num_hidden_layers
UpperCAmelCase = num_attention_heads
UpperCAmelCase = layer_norm_epsilon
UpperCAmelCase = initializer_range
UpperCAmelCase = use_cache
UpperCAmelCase = hidden_dropout
UpperCAmelCase = attention_dropout
UpperCAmelCase = bos_token_id
UpperCAmelCase = eos_token_id
UpperCAmelCase = num_attention_heads if num_kv_heads is None else num_kv_heads
UpperCAmelCase = alibi
UpperCAmelCase = new_decoder_architecture
UpperCAmelCase = multi_query # Ignored when new_decoder_architecture is True
UpperCAmelCase = parallel_attn
UpperCAmelCase = bias
super().__init__(bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
@property
def a_ ( self ) -> List[Any]:
return self.hidden_size // self.num_attention_heads
@property
def a_ ( self ) -> List[str]:
return not self.alibi
| 373 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase__ = {
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = ["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ = [
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 709 | import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
lowerCAmelCase__ = None
lowerCAmelCase__ = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
lowerCAmelCase__ = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class _a :
"""simple docstring"""
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = None
# Automatically constructed
__SCREAMING_SNAKE_CASE = "PIL.Image.Image"
__SCREAMING_SNAKE_CASE = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
__SCREAMING_SNAKE_CASE = field(default='Image' , init=lowerCamelCase_ , repr=lowerCamelCase_ )
def __call__( self ):
return self.pa_type
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
_lowercase =np.array(lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return {"path": value, "bytes": None}
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return {"path": None, "bytes": value}
elif isinstance(lowerCAmelCase_ , np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(lowerCAmelCase_ )
elif value.get("path" ) is not None and os.path.isfile(value["path"] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path" )}
elif value.get("bytes" ) is not None or value.get("path" ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes" ), "path": value.get("path" )}
else:
raise ValueError(
F'''An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.''' )
def __lowerCAmelCase ( self , lowerCAmelCase_ , lowerCAmelCase_=None ):
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead." )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'." )
if token_per_repo_id is None:
_lowercase ={}
_lowercase , _lowercase =value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(F'''An image should have one of \'path\' or \'bytes\' but both are None in {value}.''' )
else:
if is_local_path(lowerCAmelCase_ ):
_lowercase =PIL.Image.open(lowerCAmelCase_ )
else:
_lowercase =path.split("::" )[-1]
try:
_lowercase =string_to_dict(lowerCAmelCase_ , config.HUB_DATASETS_URL )["repo_id"]
_lowercase =token_per_repo_id.get(lowerCAmelCase_ )
except ValueError:
_lowercase =None
with xopen(lowerCAmelCase_ , "rb" , use_auth_token=lowerCAmelCase_ ) as f:
_lowercase =BytesIO(f.read() )
_lowercase =PIL.Image.open(bytes_ )
else:
_lowercase =PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def __lowerCAmelCase ( self ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary" ),
"path": Value("string" ),
}
)
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
if pa.types.is_string(storage.type ):
_lowercase =pa.array([None] * len(lowerCAmelCase_ ) , type=pa.binary() )
_lowercase =pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_lowercase =pa.array([None] * len(lowerCAmelCase_ ) , type=pa.string() )
_lowercase =pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index("bytes" ) >= 0:
_lowercase =storage.field("bytes" )
else:
_lowercase =pa.array([None] * len(lowerCAmelCase_ ) , type=pa.binary() )
if storage.type.get_field_index("path" ) >= 0:
_lowercase =storage.field("path" )
else:
_lowercase =pa.array([None] * len(lowerCAmelCase_ ) , type=pa.string() )
_lowercase =pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
_lowercase =pa.array(
[encode_np_array(np.array(lowerCAmelCase_ ) )["bytes"] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
_lowercase =pa.array([None] * len(lowerCAmelCase_ ) , type=pa.string() )
_lowercase =pa.StructArray.from_arrays(
[bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(lowerCAmelCase_ , self.pa_type )
def __lowerCAmelCase ( self , lowerCAmelCase_ ):
@no_op_if_value_is_null
def path_to_bytes(lowerCAmelCase_ ):
with xopen(lowerCAmelCase_ , "rb" ) as f:
_lowercase =f.read()
return bytes_
_lowercase =pa.array(
[
(path_to_bytes(x["path"] ) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
_lowercase =pa.array(
[os.path.basename(lowerCAmelCase_ ) if path is not None else None for path in storage.field("path" ).to_pylist()] , type=pa.string() , )
_lowercase =pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null() )
return array_cast(lowerCAmelCase_ , self.pa_type )
def __lowerCamelCase ( ) -> List[str]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
_lowercase =list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def __lowerCamelCase ( __a : "PIL.Image.Image" ) -> bytes:
_lowercase =BytesIO()
if image.format in list_image_compression_formats():
_lowercase =image.format
else:
_lowercase ="PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(__a , format=__a )
return buffer.getvalue()
def __lowerCamelCase ( __a : "PIL.Image.Image" ) -> dict:
if hasattr(__a , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(__a )}
def __lowerCamelCase ( __a : np.ndarray ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
_lowercase =array.dtype
_lowercase =dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
_lowercase =dtype.kind
_lowercase =dtype.itemsize
_lowercase =None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
_lowercase =np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'''Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.''' )
if dtype is not dest_dtype:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
_lowercase =dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
_lowercase =dtype_byteorder + dtype_kind + str(__a )
_lowercase =np.dtype(__a )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'''Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'''' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'''Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}''' )
_lowercase =PIL.Image.fromarray(array.astype(__a ) )
return {"path": None, "bytes": image_to_bytes(__a )}
def __lowerCamelCase ( __a : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
_lowercase , _lowercase =first_non_null_value(__a )
if isinstance(__a , __a ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(__a , np.ndarray ):
_lowercase =no_op_if_value_is_null(__a )
return [obj_to_image_dict_func(__a ) for obj in objs]
elif isinstance(__a , PIL.Image.Image ):
_lowercase =no_op_if_value_is_null(__a )
return [obj_to_image_dict_func(__a ) for obj in objs]
else:
return objs
else:
return objs
| 594 | 0 |
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __A() -> str:
"""simple docstring"""
_UpperCamelCase = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
_UpperCamelCase = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw ).convert("""RGB""" )
return image
def __A(lowerCAmelCase ) -> Dict:
"""simple docstring"""
_UpperCamelCase = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((F'visual_encoder.blocks.{i}.norm1.weight', F'vision_model.encoder.layers.{i}.layer_norm1.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm1.bias', F'vision_model.encoder.layers.{i}.layer_norm1.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm2.weight', F'vision_model.encoder.layers.{i}.layer_norm2.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.norm2.bias', F'vision_model.encoder.layers.{i}.layer_norm2.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.qkv.weight', F'vision_model.encoder.layers.{i}.self_attn.qkv.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.weight', F'vision_model.encoder.layers.{i}.self_attn.projection.weight',) )
rename_keys.append((F'visual_encoder.blocks.{i}.attn.proj.bias', F'vision_model.encoder.layers.{i}.self_attn.projection.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.weight', F'vision_model.encoder.layers.{i}.mlp.fc1.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc1.bias', F'vision_model.encoder.layers.{i}.mlp.fc1.bias') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.weight', F'vision_model.encoder.layers.{i}.mlp.fc2.weight') )
rename_keys.append((F'visual_encoder.blocks.{i}.mlp.fc2.bias', F'vision_model.encoder.layers.{i}.mlp.fc2.bias') )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def __A(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) -> int:
"""simple docstring"""
_UpperCamelCase = dct.pop(lowerCAmelCase )
_UpperCamelCase = val
def __A(lowerCAmelCase , lowerCAmelCase ) -> Tuple:
"""simple docstring"""
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
_UpperCamelCase = state_dict.pop(F'visual_encoder.blocks.{i}.attn.q_bias' )
_UpperCamelCase = state_dict.pop(F'visual_encoder.blocks.{i}.attn.v_bias' )
# next, set bias in the state dict
_UpperCamelCase = torch.cat((q_bias, torch.zeros_like(lowerCAmelCase , requires_grad=lowerCAmelCase ), v_bias) )
_UpperCamelCase = qkv_bias
def __A(lowerCAmelCase , lowerCAmelCase ) -> Optional[Any]:
"""simple docstring"""
_UpperCamelCase = 3_6_4 if """coco""" in model_name else 2_2_4
_UpperCamelCase = BlipaVisionConfig(image_size=lowerCAmelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
_UpperCamelCase = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=lowerCAmelCase ).to_dict()
elif "opt-6.7b" in model_name:
_UpperCamelCase = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=lowerCAmelCase ).to_dict()
elif "t5-xl" in model_name:
_UpperCamelCase = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
_UpperCamelCase = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
_UpperCamelCase = BlipaConfig(vision_config=lowerCAmelCase , text_config=lowerCAmelCase )
return config, image_size
@torch.no_grad()
def __A(lowerCAmelCase , lowerCAmelCase=None , lowerCAmelCase=False ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
_UpperCamelCase = tokenizer("""\n""" , add_special_tokens=lowerCAmelCase ).input_ids[0]
_UpperCamelCase , _UpperCamelCase = get_blipa_config(lowerCAmelCase , eos_token_id=lowerCAmelCase )
_UpperCamelCase = BlipaForConditionalGeneration(lowerCAmelCase ).eval()
_UpperCamelCase = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
_UpperCamelCase , _UpperCamelCase = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
_UpperCamelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = load_model_and_preprocess(
name=lowerCAmelCase , model_type=lowerCAmelCase , is_eval=lowerCAmelCase , device=lowerCAmelCase )
original_model.eval()
print("""Done!""" )
# update state dict keys
_UpperCamelCase = original_model.state_dict()
_UpperCamelCase = create_rename_keys(lowerCAmelCase )
for src, dest in rename_keys:
rename_key(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
_UpperCamelCase = state_dict.pop(lowerCAmelCase )
if key.startswith("""Qformer.bert""" ):
_UpperCamelCase = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
_UpperCamelCase = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
_UpperCamelCase = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
_UpperCamelCase = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
_UpperCamelCase = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
_UpperCamelCase = key.replace("""t5""" , """language""" )
_UpperCamelCase = val
# read in qv biases
read_in_q_v_bias(lowerCAmelCase , lowerCAmelCase )
_UpperCamelCase , _UpperCamelCase = hf_model.load_state_dict(lowerCAmelCase , strict=lowerCAmelCase )
assert len(lowerCAmelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
_UpperCamelCase = load_demo_image()
_UpperCamelCase = vis_processors["""eval"""](lowerCAmelCase ).unsqueeze(0 ).to(lowerCAmelCase )
_UpperCamelCase = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(lowerCAmelCase )
# create processor
_UpperCamelCase = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=lowerCAmelCase , image_std=lowerCAmelCase )
_UpperCamelCase = BlipaProcessor(image_processor=lowerCAmelCase , tokenizer=lowerCAmelCase )
_UpperCamelCase = processor(images=lowerCAmelCase , return_tensors="""pt""" ).pixel_values.to(lowerCAmelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(lowerCAmelCase , lowerCAmelCase )
original_model.to(lowerCAmelCase )
hf_model.to(lowerCAmelCase )
with torch.no_grad():
if "opt" in model_name:
_UpperCamelCase = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
_UpperCamelCase = hf_model(lowerCAmelCase , lowerCAmelCase ).logits
else:
_UpperCamelCase = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
_UpperCamelCase = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -1_0_0 )
_UpperCamelCase = hf_model(lowerCAmelCase , lowerCAmelCase , labels=lowerCAmelCase ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
_UpperCamelCase = torch.tensor(
[[-41.5850, -4.4440, -8.9922], [-47.4322, -5.9143, -1.7340]] , device=lowerCAmelCase )
assert torch.allclose(logits[0, :3, :3] , lowerCAmelCase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
_UpperCamelCase = torch.tensor(
[[-57.0109, -9.8967, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=lowerCAmelCase )
else:
# cast to same type
_UpperCamelCase = logits.dtype
assert torch.allclose(original_logits.to(lowerCAmelCase ) , lowerCAmelCase , atol=1e-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
_UpperCamelCase = """"""
_UpperCamelCase = tokenizer(lowerCAmelCase , return_tensors="""pt""" ).input_ids.to(lowerCAmelCase )
_UpperCamelCase = original_model.generate({"""image""": original_pixel_values} )
_UpperCamelCase = hf_model.generate(
lowerCAmelCase , lowerCAmelCase , do_sample=lowerCAmelCase , num_beams=5 , max_length=3_0 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , lowerCAmelCase )
_UpperCamelCase = input_ids.shape[1]
_UpperCamelCase = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=lowerCAmelCase )
_UpperCamelCase = [text.strip() for text in output_text]
print("""HF generation:""" , lowerCAmelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(lowerCAmelCase )
hf_model.save_pretrained(lowerCAmelCase )
if push_to_hub:
processor.push_to_hub(F'nielsr/{model_name}' )
hf_model.push_to_hub(F'nielsr/{model_name}' )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
lowerCamelCase__ = [
"blip2-opt-2.7b",
"blip2-opt-6.7b",
"blip2-opt-2.7b-coco",
"blip2-opt-6.7b-coco",
"blip2-flan-t5-xl",
"blip2-flan-t5-xl-coco",
"blip2-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="blip2-opt-2.7b",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
lowerCamelCase__ = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 612 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
"configuration_upernet": ["UperNetConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"UperNetForSemanticSegmentation",
"UperNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_upernet import UperNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_upernet import UperNetForSemanticSegmentation, UperNetPreTrainedModel
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 612 | 1 |
'''simple docstring'''
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=os.environ.get('LOGLEVEL', 'INFO').upper(),
stream=sys.stdout,
)
lowercase_ = logging.getLogger(__name__)
lowercase_ = {'facebook/bart-base': BartForConditionalGeneration}
lowercase_ = {'facebook/bart-base': BartTokenizer}
def a ( ) -> Optional[Any]:
"""simple docstring"""
_lowercase =argparse.ArgumentParser(description='Export Bart model + Beam Search to ONNX graph.' )
parser.add_argument(
'--validation_file' , type=A__ , default=A__ , help='A csv or a json file containing the validation data.' )
parser.add_argument(
'--max_length' , type=A__ , default=5 , help='The maximum total input sequence length after tokenization.' , )
parser.add_argument(
'--num_beams' , type=A__ , default=A__ , help=(
'Number of beams to use for evaluation. This argument will be '
'passed to ``model.generate``, which is used during ``evaluate`` and ``predict``.'
) , )
parser.add_argument(
'--model_name_or_path' , type=A__ , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=A__ , )
parser.add_argument(
'--config_name' , type=A__ , default=A__ , help='Pretrained config name or path if not the same as model_name' , )
parser.add_argument(
'--device' , type=A__ , default='cpu' , help='Device where the model will be run' , )
parser.add_argument('--output_file_path' , type=A__ , default=A__ , help='Where to store the final ONNX file.' )
_lowercase =parser.parse_args()
return args
def a ( A__ : int , A__ : Optional[int]="cpu" ) -> Optional[int]:
"""simple docstring"""
_lowercase =model_dict[model_name].from_pretrained(A__ ).to(A__ )
_lowercase =tokenizer_dict[model_name].from_pretrained(A__ )
if model_name in ["facebook/bart-base"]:
_lowercase =0
_lowercase =None
_lowercase =0
return huggingface_model, tokenizer
def a ( A__ : List[str] , A__ : Optional[Any] , A__ : List[Any] , A__ : Dict , A__ : Tuple ) -> List[str]:
"""simple docstring"""
model.eval()
_lowercase =None
_lowercase =torch.jit.script(BARTBeamSearchGenerator(A__ ) )
with torch.no_grad():
_lowercase ='My friends are cool but they eat too many carbs.'
_lowercase =tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors='pt' ).to(model.device )
_lowercase =model.generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , num_beams=A__ , max_length=A__ , early_stopping=A__ , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
A__ , (
inputs['input_ids'],
inputs['attention_mask'],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , A__ , opset_version=14 , input_names=['input_ids', 'attention_mask', 'num_beams', 'max_length', 'decoder_start_token_id'] , output_names=['output_ids'] , dynamic_axes={
'input_ids': {0: 'batch', 1: 'seq'},
'output_ids': {0: 'batch', 1: 'seq_out'},
} , example_outputs=A__ , )
logger.info('Model exported to {}'.format(A__ ) )
_lowercase =remove_dup_initializers(os.path.abspath(A__ ) )
logger.info('Deduplicated and optimized model written to {}'.format(A__ ) )
_lowercase =onnxruntime.InferenceSession(A__ )
_lowercase =ort_sess.run(
A__ , {
'input_ids': inputs['input_ids'].cpu().numpy(),
'attention_mask': inputs['attention_mask'].cpu().numpy(),
'num_beams': np.array(A__ ),
'max_length': np.array(A__ ),
'decoder_start_token_id': np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 )
logger.info('Model outputs from torch and ONNX Runtime are similar.' )
logger.info('Success.' )
def a ( ) -> int:
"""simple docstring"""
_lowercase =parse_args()
_lowercase =5
_lowercase =4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_lowercase =torch.device(args.device )
_lowercase , _lowercase =load_model_tokenizer(args.model_name_or_path , A__ )
if model.config.decoder_start_token_id is None:
raise ValueError('Make sure that `config.decoder_start_token_id` is correctly defined' )
model.to(A__ )
if args.max_length:
_lowercase =args.max_length
if args.num_beams:
_lowercase =args.num_beams
if args.output_file_path:
_lowercase =args.output_file_path
else:
_lowercase ='BART.onnx'
logger.info('Exporting model to ONNX' )
export_and_validate_model(A__ , A__ , A__ , A__ , A__ )
if __name__ == "__main__":
main()
| 711 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class __lowerCAmelCase ( unittest.TestCase ):
@slow
def A__ ( self ) -> str:
'''simple docstring'''
_lowercase =TFCamembertModel.from_pretrained('jplu/tf-camembert-base' )
_lowercase =tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25_543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
_lowercase =model(lowerCAmelCase )['last_hidden_state']
_lowercase =tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , lowerCAmelCase )
# compare the actual values for a slice.
_lowercase =tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 380 | 0 |
import unittest
from accelerate import debug_launcher
from accelerate.test_utils import require_cpu, test_ops, test_script
@require_cpu
class _UpperCamelCase ( unittest.TestCase ):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self ) -> int:
'''simple docstring'''
debug_launcher(test_script.main )
def _SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
debug_launcher(test_ops.main ) | 534 | from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class _UpperCamelCase ( _UpperCAmelCase ):
"""simple docstring"""
@slow
@require_torch
def _SCREAMING_SNAKE_CASE ( self ) -> Any:
'''simple docstring'''
__lowercase = EncoderDecoderModel.from_encoder_decoder_pretrained('''prajjwal1/bert-tiny''' , '''prajjwal1/bert-tiny''' )
__lowercase = BertTokenizer.from_pretrained('''bert-base-uncased''' )
__lowercase = bertabert.config.encoder.vocab_size
__lowercase = tokenizer.sep_token_id
__lowercase = tokenizer.cls_token_id
__lowercase = 1_28
__lowercase = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''train[:1%]''' )
__lowercase = datasets.load_dataset('''cnn_dailymail''' , '''3.0.0''' , split='''validation[:1%]''' )
__lowercase = train_dataset.select(range(32 ) )
__lowercase = val_dataset.select(range(16 ) )
__lowercase = 4
def _map_to_encoder_decoder_inputs(lowerCAmelCase__ ):
# Tokenizer will automatically set [BOS] <text> [EOS]
__lowercase = tokenizer(batch['''article'''] , padding='''max_length''' , truncation=lowerCAmelCase__ , max_length=5_12 )
__lowercase = tokenizer(batch['''highlights'''] , padding='''max_length''' , truncation=lowerCAmelCase__ , max_length=1_28 )
__lowercase = inputs.input_ids
__lowercase = inputs.attention_mask
__lowercase = outputs.input_ids
__lowercase = outputs.input_ids.copy()
__lowercase = [
[-1_00 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch['''labels''']
]
__lowercase = outputs.attention_mask
assert all(len(lowerCAmelCase__ ) == 5_12 for x in inputs.input_ids )
assert all(len(lowerCAmelCase__ ) == 1_28 for x in outputs.input_ids )
return batch
def _compute_metrics(lowerCAmelCase__ ):
__lowercase = pred.label_ids
__lowercase = pred.predictions
# all unnecessary tokens are removed
__lowercase = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
__lowercase = tokenizer.batch_decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
__lowercase = sum([int(pred_str[i] == label_str[i] ) for i in range(len(lowerCAmelCase__ ) )] ) / len(lowerCAmelCase__ )
return {"accuracy": accuracy}
# map train dataset
__lowercase = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , remove_columns=['''article''', '''highlights'''] , )
train_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
# same for validation dataset
__lowercase = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , remove_columns=['''article''', '''highlights'''] , )
val_dataset.set_format(
type='''torch''' , columns=['''input_ids''', '''attention_mask''', '''decoder_input_ids''', '''decoder_attention_mask''', '''labels'''] , )
__lowercase = self.get_auto_remove_tmp_dir()
__lowercase = SeqaSeqTrainingArguments(
output_dir=lowerCAmelCase__ , per_device_train_batch_size=lowerCAmelCase__ , per_device_eval_batch_size=lowerCAmelCase__ , predict_with_generate=lowerCAmelCase__ , evaluation_strategy='''steps''' , do_train=lowerCAmelCase__ , do_eval=lowerCAmelCase__ , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
__lowercase = SeqaSeqTrainer(
model=lowerCAmelCase__ , args=lowerCAmelCase__ , compute_metrics=_compute_metrics , train_dataset=lowerCAmelCase__ , eval_dataset=lowerCAmelCase__ , tokenizer=lowerCAmelCase__ , )
# start training
trainer.train() | 534 | 1 |
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
lowerCAmelCase_ : Tuple = "\\n@inproceedings{popovic-2015-chrf,\n title = \"chr{F}: character n-gram {F}-score for automatic {MT} evaluation\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Tenth Workshop on Statistical Machine Translation\",\n month = sep,\n year = \"2015\",\n address = \"Lisbon, Portugal\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W15-3049\",\n doi = \"10.18653/v1/W15-3049\",\n pages = \"392--395\",\n}\n@inproceedings{popovic-2017-chrf,\n title = \"chr{F}++: words helping character n-grams\",\n author = \"Popovi{\'c}, Maja\",\n booktitle = \"Proceedings of the Second Conference on Machine Translation\",\n month = sep,\n year = \"2017\",\n address = \"Copenhagen, Denmark\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/W17-4770\",\n doi = \"10.18653/v1/W17-4770\",\n pages = \"612--618\",\n}\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
lowerCAmelCase_ : List[Any] = "\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu's required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n"
lowerCAmelCase_ : List[str] = "\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n 'score' (float): The chrF (chrF++) score,\n 'char_order' (int): The character n-gram order,\n 'word_order' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n 'beta' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {'score': 84.64214891738334, 'char_order': 6, 'word_order': 0, 'beta': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {'score': 82.87263732906315, 'char_order': 6, 'word_order': 2, 'beta': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = [\"The relationship between cats and dogs is not exactly friendly.\", \"a good bookshop is just a genteel black hole that knows how to read.\"]\n >>> reference = [[\"The relationship between dogs and cats is not exactly friendly.\"], [\"A good bookshop is just a genteel Black Hole that knows how to read.\"]]\n >>> chrf = datasets.load_metric(\"chrf\")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {'score': 92.12853119829202, 'char_order': 6, 'word_order': 2, 'beta': 2}\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase ( datasets.Metric ):
def __UpperCAmelCase ( self : List[Any]) -> str:
if version.parse(scb.__version__) < version.parse("1.4.12"):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`.")
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/mjpost/sacreBLEU#chrf--chrf" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence"),
"references": datasets.Sequence(datasets.Value("string" , id="sequence") , id="references"),
}) , codebase_urls=["https://github.com/mjpost/sacreBLEU#chrf--chrf"] , reference_urls=[
"https://github.com/m-popovic/chrF",
] , )
def __UpperCAmelCase ( self : Any , __lowerCAmelCase : Optional[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : int = CHRF.CHAR_ORDER , __lowerCAmelCase : int = CHRF.WORD_ORDER , __lowerCAmelCase : int = CHRF.BETA , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , ) -> Any:
lowercase_ = len(references[0])
if any(len(__lowerCAmelCase) != references_per_prediction for refs in references):
raise ValueError("Sacrebleu requires the same number of references for each prediction")
lowercase_ = [[refs[i] for refs in references] for i in range(__lowerCAmelCase)]
lowercase_ = CHRF(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase)
lowercase_ = sb_chrf.corpus_score(__lowerCAmelCase , __lowerCAmelCase)
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 711 | '''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
lowerCAmelCase_ : Optional[List[str]] = None
lowerCAmelCase_ : str = "<" if sys.byteorder == "little" else ">"
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
lowerCAmelCase_ : Any = [
np.dtype("|b1"),
np.dtype("|u1"),
np.dtype("<u2"),
np.dtype(">u2"),
np.dtype("<i2"),
np.dtype(">i2"),
np.dtype("<u4"),
np.dtype(">u4"),
np.dtype("<i4"),
np.dtype(">i4"),
np.dtype("<f4"),
np.dtype(">f4"),
np.dtype("<f8"),
np.dtype(">f8"),
]
@dataclass
class lowercase :
lowerCamelCase_ =True
lowerCamelCase_ =None
# Automatically constructed
lowerCamelCase_ ="PIL.Image.Image"
lowerCamelCase_ =pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
lowerCamelCase_ =field(default='Image' , init=__lowerCamelCase , repr=__lowerCamelCase )
def __call__( self : List[Any]) -> List[Any]:
return self.pa_type
def __UpperCAmelCase ( self : Any , __lowerCAmelCase : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"]) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'.")
if isinstance(__lowerCAmelCase , __lowerCAmelCase):
lowercase_ = np.array(__lowerCAmelCase)
if isinstance(__lowerCAmelCase , __lowerCAmelCase):
return {"path": value, "bytes": None}
elif isinstance(__lowerCAmelCase , __lowerCAmelCase):
return {"path": None, "bytes": value}
elif isinstance(__lowerCAmelCase , np.ndarray):
# convert the image array to PNG/TIFF bytes
return encode_np_array(__lowerCAmelCase)
elif isinstance(__lowerCAmelCase , PIL.Image.Image):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(__lowerCAmelCase)
elif value.get("path") is not None and os.path.isfile(value["path"]):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get("path")}
elif value.get("bytes") is not None or value.get("path") is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get("bytes"), "path": value.get("path")}
else:
raise ValueError(
F'An image sample should have one of \'path\' or \'bytes\' but they are missing or None in {value}.')
def __UpperCAmelCase ( self : Union[str, Any] , __lowerCAmelCase : dict , __lowerCAmelCase : Dict=None) -> "PIL.Image.Image":
if not self.decode:
raise RuntimeError("Decoding is disabled for this feature. Please use Image(decode=True) instead.")
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support decoding images, please install 'Pillow'.")
if token_per_repo_id is None:
lowercase_ = {}
lowercase_ , lowercase_ = value["path"], value["bytes"]
if bytes_ is None:
if path is None:
raise ValueError(F'An image should have one of \'path\' or \'bytes\' but both are None in {value}.')
else:
if is_local_path(__lowerCAmelCase):
lowercase_ = PIL.Image.open(__lowerCAmelCase)
else:
lowercase_ = path.split("::")[-1]
try:
lowercase_ = string_to_dict(__lowerCAmelCase , config.HUB_DATASETS_URL)["repo_id"]
lowercase_ = token_per_repo_id.get(__lowerCAmelCase)
except ValueError:
lowercase_ = None
with xopen(__lowerCAmelCase , "rb" , use_auth_token=__lowerCAmelCase) as f:
lowercase_ = BytesIO(f.read())
lowercase_ = PIL.Image.open(bytes_)
else:
lowercase_ = PIL.Image.open(BytesIO(bytes_))
image.load() # to avoid "Too many open files" errors
return image
def __UpperCAmelCase ( self : Tuple) -> Union["FeatureType", Dict[str, "FeatureType"]]:
from .features import Value
return (
self
if self.decode
else {
"bytes": Value("binary"),
"path": Value("string"),
}
)
def __UpperCAmelCase ( self : str , __lowerCAmelCase : Union[pa.StringArray, pa.StructArray, pa.ListArray]) -> pa.StructArray:
if pa.types.is_string(storage.type):
lowercase_ = pa.array([None] * len(__lowerCAmelCase) , type=pa.binary())
lowercase_ = pa.StructArray.from_arrays([bytes_array, storage] , ["bytes", "path"] , mask=storage.is_null())
elif pa.types.is_binary(storage.type):
lowercase_ = pa.array([None] * len(__lowerCAmelCase) , type=pa.string())
lowercase_ = pa.StructArray.from_arrays([storage, path_array] , ["bytes", "path"] , mask=storage.is_null())
elif pa.types.is_struct(storage.type):
if storage.type.get_field_index("bytes") >= 0:
lowercase_ = storage.field("bytes")
else:
lowercase_ = pa.array([None] * len(__lowerCAmelCase) , type=pa.binary())
if storage.type.get_field_index("path") >= 0:
lowercase_ = storage.field("path")
else:
lowercase_ = pa.array([None] * len(__lowerCAmelCase) , type=pa.string())
lowercase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=storage.is_null())
elif pa.types.is_list(storage.type):
lowercase_ = pa.array(
[encode_np_array(np.array(__lowerCAmelCase))["bytes"] if arr is not None else None for arr in storage.to_pylist()] , type=pa.binary() , )
lowercase_ = pa.array([None] * len(__lowerCAmelCase) , type=pa.string())
lowercase_ = pa.StructArray.from_arrays(
[bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null())
return array_cast(__lowerCAmelCase , self.pa_type)
def __UpperCAmelCase ( self : List[Any] , __lowerCAmelCase : pa.StructArray) -> pa.StructArray:
@no_op_if_value_is_null
def path_to_bytes(__lowerCAmelCase : int):
with xopen(__lowerCAmelCase , "rb") as f:
lowercase_ = f.read()
return bytes_
lowercase_ = pa.array(
[
(path_to_bytes(x["path"]) if x["bytes"] is None else x["bytes"]) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
lowercase_ = pa.array(
[os.path.basename(__lowerCAmelCase) if path is not None else None for path in storage.field("path").to_pylist()] , type=pa.string() , )
lowercase_ = pa.StructArray.from_arrays([bytes_array, path_array] , ["bytes", "path"] , mask=bytes_array.is_null())
return array_cast(__lowerCAmelCase , self.pa_type)
def __a ( ) -> List[str]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
lowercase_ = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def __a ( __lowerCamelCase : "PIL.Image.Image" ) -> bytes:
'''simple docstring'''
lowercase_ = BytesIO()
if image.format in list_image_compression_formats():
lowercase_ = image.format
else:
lowercase_ = "PNG" if image.mode in ["1", "L", "LA", "RGB", "RGBA"] else "TIFF"
image.save(__lowerCamelCase , format=__lowerCamelCase )
return buffer.getvalue()
def __a ( __lowerCamelCase : "PIL.Image.Image" ) -> dict:
'''simple docstring'''
if hasattr(__lowerCamelCase , "filename" ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(__lowerCamelCase )}
def __a ( __lowerCamelCase : np.ndarray ) -> dict:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
lowercase_ = array.dtype
lowercase_ = dtype.byteorder if dtype.byteorder != "=" else _NATIVE_BYTEORDER
lowercase_ = dtype.kind
lowercase_ = dtype.itemsize
lowercase_ = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
lowercase_ = np.dtype("|u1" )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f'Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.' )
if dtype is not dest_dtype:
warnings.warn(f'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
lowercase_ = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
lowercase_ = dtype_byteorder + dtype_kind + str(__lowerCamelCase )
lowercase_ = np.dtype(__lowerCamelCase )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f'Downcasting array dtype {dtype} to {dest_dtype} to be compatible with \'Pillow\'' )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f'Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}' )
lowercase_ = PIL.Image.fromarray(array.astype(__lowerCamelCase ) )
return {"path": None, "bytes": image_to_bytes(__lowerCamelCase )}
def __a ( __lowerCamelCase : Union[List[str], List[dict], List[np.ndarray], List["PIL.Image.Image"]] ) -> List[dict]:
'''simple docstring'''
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError("To support encoding images, please install 'Pillow'." )
if objs:
lowercase_ , lowercase_ = first_non_null_value(__lowerCamelCase )
if isinstance(__lowerCamelCase , __lowerCamelCase ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(__lowerCamelCase , np.ndarray ):
lowercase_ = no_op_if_value_is_null(__lowerCamelCase )
return [obj_to_image_dict_func(__lowerCamelCase ) for obj in objs]
elif isinstance(__lowerCamelCase , PIL.Image.Image ):
lowercase_ = no_op_if_value_is_null(__lowerCamelCase )
return [obj_to_image_dict_func(__lowerCamelCase ) for obj in objs]
else:
return objs
else:
return objs
| 461 | 0 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
from ...utils.constants import SAGEMAKER_PARALLEL_EC2_INSTANCES, TORCH_DYNAMO_MODES
from ...utils.dataclasses import ComputeEnvironment, SageMakerDistributedType
from ...utils.imports import is_botoa_available
from .config_args import SageMakerConfig
from .config_utils import (
DYNAMO_BACKENDS,
_ask_field,
_ask_options,
_convert_dynamo_backend,
_convert_mixed_precision,
_convert_sagemaker_distributed_mode,
_convert_yes_no_to_bool,
)
if is_botoa_available():
import botoa # noqa: F401
def A (__lowerCamelCase :int ):
_lowerCAmelCase = botoa.client("""iam""" )
_lowerCAmelCase = {
"""Version""": """2012-10-17""",
"""Statement""": [
{"""Effect""": """Allow""", """Principal""": {"""Service""": """sagemaker.amazonaws.com"""}, """Action""": """sts:AssumeRole"""}
],
}
try:
# create the role, associated with the chosen trust policy
iam_client.create_role(
RoleName=__lowerCamelCase , AssumeRolePolicyDocument=json.dumps(__lowerCamelCase , indent=2 ) )
_lowerCAmelCase = {
"""Version""": """2012-10-17""",
"""Statement""": [
{
"""Effect""": """Allow""",
"""Action""": [
"""sagemaker:*""",
"""ecr:GetDownloadUrlForLayer""",
"""ecr:BatchGetImage""",
"""ecr:BatchCheckLayerAvailability""",
"""ecr:GetAuthorizationToken""",
"""cloudwatch:PutMetricData""",
"""cloudwatch:GetMetricData""",
"""cloudwatch:GetMetricStatistics""",
"""cloudwatch:ListMetrics""",
"""logs:CreateLogGroup""",
"""logs:CreateLogStream""",
"""logs:DescribeLogStreams""",
"""logs:PutLogEvents""",
"""logs:GetLogEvents""",
"""s3:CreateBucket""",
"""s3:ListBucket""",
"""s3:GetBucketLocation""",
"""s3:GetObject""",
"""s3:PutObject""",
],
"""Resource""": """*""",
}
],
}
# attach policy to role
iam_client.put_role_policy(
RoleName=__lowerCamelCase , PolicyName=f'{role_name}_policy_permission' , PolicyDocument=json.dumps(__lowerCamelCase , indent=2 ) , )
except iam_client.exceptions.EntityAlreadyExistsException:
print(f'role {role_name} already exists. Using existing one' )
def A (__lowerCamelCase :List[str] ):
_lowerCAmelCase = botoa.client("""iam""" )
return iam_client.get_role(RoleName=__lowerCamelCase )["Role"]["Arn"]
def A ():
_lowerCAmelCase = _ask_options(
"""How do you want to authorize?""" , ["""AWS Profile""", """Credentials (AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY) """] , __lowerCamelCase , )
_lowerCAmelCase = None
if credentials_configuration == 0:
_lowerCAmelCase = _ask_field("""Enter your AWS Profile name: [default] """ , default="""default""" )
_lowerCAmelCase = aws_profile
else:
print(
"""Note you will need to provide AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY when you launch you training script with,"""
"""`accelerate launch --aws_access_key_id XXX --aws_secret_access_key YYY`""" )
_lowerCAmelCase = _ask_field("""AWS Access Key ID: """ )
_lowerCAmelCase = aws_access_key_id
_lowerCAmelCase = _ask_field("""AWS Secret Access Key: """ )
_lowerCAmelCase = aws_secret_access_key
_lowerCAmelCase = _ask_field("""Enter your AWS Region: [us-east-1]""" , default="""us-east-1""" )
_lowerCAmelCase = aws_region
_lowerCAmelCase = _ask_options(
"""Do you already have an IAM Role for executing Amazon SageMaker Training Jobs?""" , ["""Provide IAM Role name""", """Create new IAM role using credentials"""] , __lowerCamelCase , )
if role_management == 0:
_lowerCAmelCase = _ask_field("""Enter your IAM role name: """ )
else:
_lowerCAmelCase = """accelerate_sagemaker_execution_role"""
print(f'Accelerate will create an iam role "{iam_role_name}" using the provided credentials' )
_create_iam_role_for_sagemaker(__lowerCamelCase )
_lowerCAmelCase = _ask_field(
"""Do you want to use custom Docker image? [yes/NO]: """ , _convert_yes_no_to_bool , default=__lowerCamelCase , error_message="""Please enter yes or no.""" , )
_lowerCAmelCase = None
if is_custom_docker_image:
_lowerCAmelCase = _ask_field("""Enter your Docker image: """ , lambda __lowerCamelCase : str(__lowerCamelCase ).lower() )
_lowerCAmelCase = _ask_field(
"""Do you want to provide SageMaker input channels with data locations? [yes/NO]: """ , _convert_yes_no_to_bool , default=__lowerCamelCase , error_message="""Please enter yes or no.""" , )
_lowerCAmelCase = None
if is_sagemaker_inputs_enabled:
_lowerCAmelCase = _ask_field(
"""Enter the path to the SageMaker inputs TSV file with columns (channel_name, data_location): """ , lambda __lowerCamelCase : str(__lowerCamelCase ).lower() , )
_lowerCAmelCase = _ask_field(
"""Do you want to enable SageMaker metrics? [yes/NO]: """ , _convert_yes_no_to_bool , default=__lowerCamelCase , error_message="""Please enter yes or no.""" , )
_lowerCAmelCase = None
if is_sagemaker_metrics_enabled:
_lowerCAmelCase = _ask_field(
"""Enter the path to the SageMaker metrics TSV file with columns (metric_name, metric_regex): """ , lambda __lowerCamelCase : str(__lowerCamelCase ).lower() , )
_lowerCAmelCase = _ask_options(
"""What is the distributed mode?""" , ["""No distributed training""", """Data parallelism"""] , _convert_sagemaker_distributed_mode , )
_lowerCAmelCase = {}
_lowerCAmelCase = _ask_field(
"""Do you wish to optimize your script with torch dynamo?[yes/NO]:""" , _convert_yes_no_to_bool , default=__lowerCamelCase , error_message="""Please enter yes or no.""" , )
if use_dynamo:
_lowerCAmelCase = """dynamo_"""
_lowerCAmelCase = _ask_options(
"""Which dynamo backend would you like to use?""" , [x.lower() for x in DYNAMO_BACKENDS] , _convert_dynamo_backend , default=2 , )
_lowerCAmelCase = _ask_field(
"""Do you want to customize the defaults sent to torch.compile? [yes/NO]: """ , _convert_yes_no_to_bool , default=__lowerCamelCase , error_message="""Please enter yes or no.""" , )
if use_custom_options:
_lowerCAmelCase = _ask_options(
"""Which mode do you want to use?""" , __lowerCamelCase , lambda __lowerCamelCase : TORCH_DYNAMO_MODES[int(__lowerCamelCase )] , default="""default""" , )
_lowerCAmelCase = _ask_field(
"""Do you want the fullgraph mode or it is ok to break model into several subgraphs? [yes/NO]: """ , _convert_yes_no_to_bool , default=__lowerCamelCase , error_message="""Please enter yes or no.""" , )
_lowerCAmelCase = _ask_field(
"""Do you want to enable dynamic shape tracing? [yes/NO]: """ , _convert_yes_no_to_bool , default=__lowerCamelCase , error_message="""Please enter yes or no.""" , )
_lowerCAmelCase = """Which EC2 instance type you want to use for your training?"""
if distributed_type != SageMakerDistributedType.NO:
_lowerCAmelCase = _ask_options(
__lowerCamelCase , __lowerCamelCase , lambda __lowerCamelCase : SAGEMAKER_PARALLEL_EC2_INSTANCES[int(__lowerCamelCase )] )
else:
eca_instance_query += "? [ml.p3.2xlarge]:"
_lowerCAmelCase = _ask_field(__lowerCamelCase , lambda __lowerCamelCase : str(__lowerCamelCase ).lower() , default="""ml.p3.2xlarge""" )
_lowerCAmelCase = 1
if distributed_type in (SageMakerDistributedType.DATA_PARALLEL, SageMakerDistributedType.MODEL_PARALLEL):
_lowerCAmelCase = _ask_field(
"""How many machines do you want use? [1]: """ , __lowerCamelCase , default=1 , )
_lowerCAmelCase = _ask_options(
"""Do you wish to use FP16 or BF16 (mixed precision)?""" , ["""no""", """fp16""", """bf16""", """fp8"""] , _convert_mixed_precision , )
if use_dynamo and mixed_precision == "no":
print(
"""Torch dynamo used without mixed precision requires TF32 to be efficient. Accelerate will enable it by default when launching your scripts.""" )
return SageMakerConfig(
image_uri=__lowerCamelCase , compute_environment=ComputeEnvironment.AMAZON_SAGEMAKER , distributed_type=__lowerCamelCase , use_cpu=__lowerCamelCase , dynamo_config=__lowerCamelCase , eca_instance_type=__lowerCamelCase , profile=__lowerCamelCase , region=__lowerCamelCase , iam_role_name=__lowerCamelCase , mixed_precision=__lowerCamelCase , num_machines=__lowerCamelCase , sagemaker_inputs_file=__lowerCamelCase , sagemaker_metrics_file=__lowerCamelCase , )
| 5 |
'''simple docstring'''
import unittest
from transformers import PegasusTokenizer, PegasusTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
UpperCAmelCase_ = get_tests_dir('fixtures/test_sentencepiece_no_bos.model')
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = PegasusTokenizer
lowerCAmelCase_ : Optional[Any] = PegasusTokenizerFast
lowerCAmelCase_ : Optional[int] = True
lowerCAmelCase_ : List[str] = True
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ = PegasusTokenizer(_UpperCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained("""google/pegasus-large""" )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , **_UpperCAmelCase : Union[str, Any] ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : int , _UpperCAmelCase : List[str] ):
"""simple docstring"""
return ("This is a test", "This is a test")
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = """</s>"""
UpperCAmelCase__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) , _UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """</s>""" )
self.assertEqual(vocab_keys[-1] , """v""" )
self.assertEqual(len(_UpperCAmelCase ) , 11_03 )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
self.assertEqual(self.get_tokenizer().vocab_size , 11_03 )
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase__ = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase__ = (
"""Let's see which <unk> is the better <unk_token_11> one <mask_1> It seems like this <mask_2> was important"""
""" </s> <pad> <pad> <pad>"""
)
UpperCAmelCase__ = rust_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0]
UpperCAmelCase__ = py_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self._large_tokenizer
# <mask_1> masks whole sentence while <mask_2> masks single word
UpperCAmelCase__ = """<mask_1> To ensure a <mask_2> flow of bank resolutions."""
UpperCAmelCase__ = [2, 4_13, 6_15, 1_14, 3, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
UpperCAmelCase__ = tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase ).input_ids[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self._large_tokenizer
# The tracebacks for the following asserts are **better** without messages or self.assertEqual
assert tokenizer.vocab_size == 9_61_03
assert tokenizer.pad_token_id == 0
assert tokenizer.eos_token_id == 1
assert tokenizer.offset == 1_03
assert tokenizer.unk_token_id == tokenizer.offset + 2 == 1_05
assert tokenizer.unk_token == "<unk>"
assert tokenizer.model_max_length == 10_24
UpperCAmelCase__ = """To ensure a smooth flow of bank resolutions."""
UpperCAmelCase__ = [4_13, 6_15, 1_14, 22_91, 19_71, 1_13, 16_79, 1_07_10, 1_07, 1]
UpperCAmelCase__ = tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase ).input_ids[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
assert tokenizer.convert_ids_to_tokens([0, 1, 2, 3] ) == ["<pad>", "</s>", "<mask_1>", "<mask_2>"]
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : int ):
"""simple docstring"""
UpperCAmelCase__ = ["""This is going to be way too long.""" * 1_50, """short example"""]
UpperCAmelCase__ = ["""not super long but more than 5 tokens""", """tiny"""]
UpperCAmelCase__ = self._large_tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors="""pt""" )
UpperCAmelCase__ = self._large_tokenizer(
text_target=_UpperCAmelCase , max_length=5 , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 10_24)
assert batch.attention_mask.shape == (2, 10_24)
assert targets["input_ids"].shape == (2, 5)
assert len(_UpperCAmelCase ) == 2 # input_ids, attention_mask.
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = {"""input_ids""": [[3_89_79, 1_43, 1_84_85, 6_06, 1_30, 2_66_69, 8_76_86, 1_21, 5_41_89, 11_29, 1_11, 2_66_69, 8_76_86, 1_21, 91_14, 1_47_87, 1_21, 1_32_49, 1_58, 5_92, 9_56, 1_21, 1_46_21, 3_15_76, 1_43, 6_26_13, 1_08, 96_88, 9_30, 4_34_30, 1_15_62, 6_26_13, 3_04, 1_08, 1_14_43, 8_97, 1_08, 93_14, 1_74_15, 6_33_99, 1_08, 1_14_43, 76_14, 1_83_16, 1_18, 42_84, 71_48, 1_24_30, 1_43, 14_00, 2_57_03, 1_58, 1_11, 42_84, 71_48, 1_17_72, 1_43, 2_12_97, 10_64, 1_58, 1_22, 2_04, 35_06, 17_54, 11_33, 1_47_87, 15_81, 1_15, 3_32_24, 44_82, 1_11, 13_55, 1_10, 2_91_73, 3_17, 5_08_33, 1_08, 2_01_47, 9_46_65, 1_11, 7_71_98, 1_07, 1], [1_10, 6_26_13, 1_17, 6_38, 1_12, 11_33, 1_21, 2_00_98, 13_55, 7_90_50, 1_38_72, 1_35, 15_96, 5_35_41, 13_52, 1_41, 1_30_39, 55_42, 1_24, 3_02, 5_18, 1_11, 2_68, 29_56, 1_15, 1_49, 44_27, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1_39, 12_35, 27_99, 1_82_89, 1_77_80, 2_04, 1_09, 94_74, 12_96, 1_07, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase , model_name="""google/bigbird-pegasus-large-arxiv""" , revision="""ba85d0851d708441f91440d509690f1ab6353415""" , )
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = PegasusTokenizer
lowerCAmelCase_ : int = PegasusTokenizerFast
lowerCAmelCase_ : List[Any] = True
lowerCAmelCase_ : Union[str, Any] = True
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase__ = PegasusTokenizer(_UpperCAmelCase , offset=0 , mask_token_sent=_UpperCAmelCase , mask_token="""[MASK]""" )
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained("""google/bigbird-pegasus-large-arxiv""" )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , **_UpperCAmelCase : str ):
"""simple docstring"""
return PegasusTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Any , _UpperCAmelCase : Any ):
"""simple docstring"""
return ("This is a test", "This is a test")
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.rust_tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase__ = self.tokenizer_class.from_pretrained(self.tmpdirname )
UpperCAmelCase__ = (
"""Let's see which <unk> is the better <unk_token> one [MASK] It seems like this [MASK] was important </s>"""
""" <pad> <pad> <pad>"""
)
UpperCAmelCase__ = rust_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0]
UpperCAmelCase__ = py_tokenizer([raw_input_str] , return_tensors=_UpperCAmelCase , add_special_tokens=_UpperCAmelCase ).input_ids[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
"""simple docstring"""
UpperCAmelCase__ = ["""This is going to be way too long.""" * 10_00, """short example"""]
UpperCAmelCase__ = ["""not super long but more than 5 tokens""", """tiny"""]
UpperCAmelCase__ = self._large_tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors="""pt""" )
UpperCAmelCase__ = self._large_tokenizer(
text_target=_UpperCAmelCase , max_length=5 , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors="""pt""" )
assert batch.input_ids.shape == (2, 40_96)
assert batch.attention_mask.shape == (2, 40_96)
assert targets["input_ids"].shape == (2, 5)
assert len(_UpperCAmelCase ) == 2 # input_ids, attention_mask.
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = (
"""This is an example string that is used to test the original TF implementation against the HF"""
""" implementation"""
)
UpperCAmelCase__ = self._large_tokenizer(_UpperCAmelCase ).input_ids
self.assertListEqual(
_UpperCAmelCase , [1_82, 1_17, 1_42, 5_87, 42_11, 1_20, 1_17, 2_63, 1_12, 8_04, 1_09, 8_56, 2_50_16, 31_37, 4_64, 1_09, 2_69_55, 31_37, 1] , )
| 603 | 0 |
"""simple docstring"""
import numpy as np
def snake_case ( UpperCamelCase__ : np.ndarray , UpperCamelCase__ : float ) -> np.ndarray:
return np.where(vector > 0 , UpperCamelCase__ , (alpha * (np.exp(UpperCamelCase__ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 42 |
"""simple docstring"""
import unittest
from knapsack import greedy_knapsack as kp
class A__ ( unittest.TestCase):
"""simple docstring"""
def a__ ( self: Optional[int] )-> Union[str, Any]:
lowerCamelCase : Tuple = [10, 20, 30, 40, 50, 60]
lowerCamelCase : Union[str, Any] = [2, 4, 6, 8, 10, 12]
lowerCamelCase : Union[str, Any] = 100
self.assertEqual(kp.calc_profit(__a , __a , __a ) , 210 )
def a__ ( self: str )-> str:
self.assertRaisesRegex(__a , """max_weight must greater than zero.""" )
def a__ ( self: str )-> List[Any]:
self.assertRaisesRegex(__a , """Weight can not be negative.""" )
def a__ ( self: Any )-> Dict:
self.assertRaisesRegex(__a , """Profit can not be negative.""" )
def a__ ( self: Optional[Any] )-> List[Any]:
self.assertRaisesRegex(__a , """max_weight must greater than zero.""" )
def a__ ( self: Optional[Any] )-> Tuple:
self.assertRaisesRegex(
__a , """The length of profit and weight must be same.""" )
if __name__ == "__main__":
unittest.main()
| 42 | 1 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version('''>=''', '''4.25.0''')):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
)
else:
from .modeling_text_unet import UNetFlatConditionModel
from .pipeline_versatile_diffusion import VersatileDiffusionPipeline
from .pipeline_versatile_diffusion_dual_guided import VersatileDiffusionDualGuidedPipeline
from .pipeline_versatile_diffusion_image_variation import VersatileDiffusionImageVariationPipeline
from .pipeline_versatile_diffusion_text_to_image import VersatileDiffusionTextToImagePipeline
| 580 |
from __future__ import annotations
import numpy as np
def UpperCamelCase_( lowerCamelCase_ ) -> Optional[int]:
return np.maximum(0 , lowerCamelCase_ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 89 | 0 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__UpperCamelCase : List[Any] = logging.get_logger(__name__)
__UpperCamelCase : Union[str, Any] = '▁'
__UpperCamelCase : Tuple = {'vocab_file': 'sentencepiece.bpe.model', 'monolingual_vocab_file': 'dict.txt'}
__UpperCamelCase : Tuple = {
'vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/sentencepiece.bpe.model',
},
'monolingual_vocab_file': {
'vinai/bartpho-syllable': 'https://huggingface.co/vinai/bartpho-syllable/resolve/main/dict.txt',
},
}
__UpperCamelCase : Tuple = {'vinai/bartpho-syllable': 1024}
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = VOCAB_FILES_NAMES
UpperCamelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase_ = ["""input_ids""", """attention_mask"""]
def __init__( self : Dict , UpperCamelCase__ : List[Any] , UpperCamelCase__ : Dict , UpperCamelCase__ : Dict="<s>" , UpperCamelCase__ : str="</s>" , UpperCamelCase__ : Dict="</s>" , UpperCamelCase__ : Tuple="<s>" , UpperCamelCase__ : str="<unk>" , UpperCamelCase__ : Dict="<pad>" , UpperCamelCase__ : Tuple="<mask>" , UpperCamelCase__ : Optional[Dict[str, Any]] = None , **UpperCamelCase__ : Tuple , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = AddedToken(UpperCamelCase__ , lstrip=UpperCamelCase__ , rstrip=UpperCamelCase__ ) if isinstance(UpperCamelCase__ , UpperCamelCase__ ) else mask_token
SCREAMING_SNAKE_CASE : Tuple = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=UpperCamelCase__ , eos_token=UpperCamelCase__ , unk_token=UpperCamelCase__ , sep_token=UpperCamelCase__ , cls_token=UpperCamelCase__ , pad_token=UpperCamelCase__ , mask_token=UpperCamelCase__ , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase__ , )
SCREAMING_SNAKE_CASE : Optional[int] = vocab_file
SCREAMING_SNAKE_CASE : Any = monolingual_vocab_file
SCREAMING_SNAKE_CASE : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(UpperCamelCase__ ) )
# Load the reduced vocab
# Keep order of special tokens for backward compatibility
SCREAMING_SNAKE_CASE : Dict = {}
SCREAMING_SNAKE_CASE : List[Any] = 0
for token in [bos_token, pad_token, eos_token, unk_token, sep_token, cls_token]:
if str(UpperCamelCase__ ) not in self.fairseq_tokens_to_ids:
SCREAMING_SNAKE_CASE : str = cnt
cnt += 1
with open(UpperCamelCase__ , '''r''' , encoding='''utf-8''' ) as f:
for line in f.readlines():
SCREAMING_SNAKE_CASE : Union[str, Any] = line.strip().split()[0]
SCREAMING_SNAKE_CASE : int = len(self.fairseq_tokens_to_ids )
if str(UpperCamelCase__ ) not in self.fairseq_tokens_to_ids:
SCREAMING_SNAKE_CASE : Any = len(self.fairseq_tokens_to_ids )
SCREAMING_SNAKE_CASE : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Any ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = self.__dict__.copy()
SCREAMING_SNAKE_CASE : Tuple = None
SCREAMING_SNAKE_CASE : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Optional[Any] , UpperCamelCase__ : Dict ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[str] = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
SCREAMING_SNAKE_CASE : List[Any] = {}
SCREAMING_SNAKE_CASE : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def __A ( self : Optional[int] , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE : Dict = [self.cls_token_id]
SCREAMING_SNAKE_CASE : Any = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __A ( self : Tuple , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None , UpperCamelCase__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase__ , token_ids_a=UpperCamelCase__ , already_has_special_tokens=UpperCamelCase__ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCamelCase__ )) + [1]
return [1] + ([0] * len(UpperCamelCase__ )) + [1, 1] + ([0] * len(UpperCamelCase__ )) + [1]
def __A ( self : int , UpperCamelCase__ : List[int] , UpperCamelCase__ : Optional[List[int]] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = [self.sep_token_id]
SCREAMING_SNAKE_CASE : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def __A ( self : int ):
'''simple docstring'''
return len(self.fairseq_ids_to_tokens )
def __A ( self : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = {self.convert_ids_to_tokens(UpperCamelCase__ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __A ( self : List[str] , UpperCamelCase__ : str ):
'''simple docstring'''
return self.sp_model.encode(UpperCamelCase__ , out_type=UpperCamelCase__ )
def __A ( self : Optional[int] , UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
else:
return self.unk_token_id
def __A ( self : str , UpperCamelCase__ : List[Any] ):
'''simple docstring'''
return self.fairseq_ids_to_tokens[index]
def __A ( self : Any , UpperCamelCase__ : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Tuple = ''''''.join(UpperCamelCase__ ).replace(UpperCamelCase__ , ''' ''' ).strip()
return out_string
def __A ( self : Optional[Any] , UpperCamelCase__ : str , UpperCamelCase__ : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase__ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
SCREAMING_SNAKE_CASE : List[Any] = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE : Dict = os.path.join(
UpperCamelCase__ , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''monolingual_vocab_file'''] , )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase__ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase__ , '''wb''' ) as fi:
SCREAMING_SNAKE_CASE : Any = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase__ )
if os.path.abspath(self.monolingual_vocab_file ) != os.path.abspath(
UpperCamelCase__ ) and os.path.isfile(self.monolingual_vocab_file ):
copyfile(self.monolingual_vocab_file , UpperCamelCase__ )
elif not os.path.isfile(self.monolingual_vocab_file ):
with open(UpperCamelCase__ , '''w''' , encoding='''utf-8''' ) as fp:
for token in self.fairseq_tokens_to_ids:
if token not in self.all_special_tokens:
fp.write(f"""{str(UpperCamelCase__ )} \n""" )
return out_vocab_file, out_monolingual_vocab_file
| 710 | import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
__UpperCamelCase : int = logging.get_logger(__name__)
def A ( _lowercase , _lowercase , _lowercase , _lowercase ):
def constraint_to_multiple_of(_lowercase , _lowercase , _lowercase=0 , _lowercase=None ):
SCREAMING_SNAKE_CASE : int = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
SCREAMING_SNAKE_CASE : Dict = math.floor(val / multiple ) * multiple
if x < min_val:
SCREAMING_SNAKE_CASE : Optional[Any] = math.ceil(val / multiple ) * multiple
return x
SCREAMING_SNAKE_CASE : Optional[Any] = (output_size, output_size) if isinstance(_lowercase , _lowercase ) else output_size
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = get_image_size(_lowercase )
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE : Optional[int] = output_size
# determine new height and width
SCREAMING_SNAKE_CASE : Dict = output_height / input_height
SCREAMING_SNAKE_CASE : Optional[Any] = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
SCREAMING_SNAKE_CASE : List[Any] = scale_width
else:
# fit height
SCREAMING_SNAKE_CASE : List[Any] = scale_height
SCREAMING_SNAKE_CASE : List[str] = constraint_to_multiple_of(scale_height * input_height , multiple=_lowercase )
SCREAMING_SNAKE_CASE : Optional[int] = constraint_to_multiple_of(scale_width * input_width , multiple=_lowercase )
return (new_height, new_width)
class lowercase__ ( UpperCamelCase_):
UpperCamelCase_ = ["""pixel_values"""]
def __init__( self : int , UpperCamelCase__ : bool = True , UpperCamelCase__ : Dict[str, int] = None , UpperCamelCase__ : PILImageResampling = PILImageResampling.BILINEAR , UpperCamelCase__ : bool = False , UpperCamelCase__ : int = 1 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Union[int, float] = 1 / 255 , UpperCamelCase__ : bool = True , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
super().__init__(**UpperCamelCase__ )
SCREAMING_SNAKE_CASE : List[str] = size if size is not None else {'''height''': 384, '''width''': 384}
SCREAMING_SNAKE_CASE : Any = get_size_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = do_resize
SCREAMING_SNAKE_CASE : Any = size
SCREAMING_SNAKE_CASE : str = keep_aspect_ratio
SCREAMING_SNAKE_CASE : List[str] = ensure_multiple_of
SCREAMING_SNAKE_CASE : int = resample
SCREAMING_SNAKE_CASE : Any = do_rescale
SCREAMING_SNAKE_CASE : List[Any] = rescale_factor
SCREAMING_SNAKE_CASE : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE : str = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __A ( self : Optional[Any] , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Dict[str, int] , UpperCamelCase__ : bool = False , UpperCamelCase__ : int = 1 , UpperCamelCase__ : PILImageResampling = PILImageResampling.BICUBIC , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : Union[str, Any] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = get_size_dict(UpperCamelCase__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE : Any = get_resize_output_image_size(
UpperCamelCase__ , output_size=(size['''height'''], size['''width''']) , keep_aspect_ratio=UpperCamelCase__ , multiple=UpperCamelCase__ , )
return resize(UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Dict , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[int, float] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : str , ):
'''simple docstring'''
return rescale(UpperCamelCase__ , scale=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Any , UpperCamelCase__ : np.ndarray , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Union[float, List[float]] , UpperCamelCase__ : Optional[Union[str, ChannelDimension]] = None , **UpperCamelCase__ : List[str] , ):
'''simple docstring'''
return normalize(UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ , data_format=UpperCamelCase__ , **UpperCamelCase__ )
def __A ( self : Optional[Any] , UpperCamelCase__ : ImageInput , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : int = None , UpperCamelCase__ : PILImageResampling = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : float = None , UpperCamelCase__ : bool = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[float, List[float]]] = None , UpperCamelCase__ : Optional[Union[str, TensorType]] = None , UpperCamelCase__ : ChannelDimension = ChannelDimension.FIRST , **UpperCamelCase__ : Optional[int] , ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Any = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE : Optional[Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE : Union[str, Any] = get_size_dict(UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Optional[Any] = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
SCREAMING_SNAKE_CASE : List[str] = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
SCREAMING_SNAKE_CASE : Tuple = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE : Union[str, Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE : str = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE : List[Any] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE : List[Any] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE : Dict = make_list_of_images(UpperCamelCase__ )
if not valid_images(UpperCamelCase__ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None or resample is None:
raise ValueError('''Size and resample must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE : Tuple = [to_numpy_array(UpperCamelCase__ ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE : Dict = [self.resize(image=UpperCamelCase__ , size=UpperCamelCase__ , resample=UpperCamelCase__ ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE : Any = [self.rescale(image=UpperCamelCase__ , scale=UpperCamelCase__ ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE : Any = [self.normalize(image=UpperCamelCase__ , mean=UpperCamelCase__ , std=UpperCamelCase__ ) for image in images]
SCREAMING_SNAKE_CASE : Optional[int] = [to_channel_dimension_format(UpperCamelCase__ , UpperCamelCase__ ) for image in images]
SCREAMING_SNAKE_CASE : Tuple = {'''pixel_values''': images}
return BatchFeature(data=UpperCamelCase__ , tensor_type=UpperCamelCase__ )
def __A ( self : Tuple , UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : List[Tuple] = None ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Dict = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(UpperCamelCase__ ) != len(UpperCamelCase__ ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(UpperCamelCase__ ):
SCREAMING_SNAKE_CASE : List[Any] = target_sizes.numpy()
SCREAMING_SNAKE_CASE : Optional[int] = []
for idx in range(len(UpperCamelCase__ ) ):
SCREAMING_SNAKE_CASE : List[str] = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=UpperCamelCase__ )
SCREAMING_SNAKE_CASE : Dict = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(UpperCamelCase__ )
else:
SCREAMING_SNAKE_CASE : List[Any] = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE : List[Any] = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 34 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = '''bert-generation'''
def __init__( self , A=5_0358 , A=1024 , A=24 , A=16 , A=4096 , A="gelu" , A=0.1 , A=0.1 , A=512 , A=0.02 , A=1e-12 , A=0 , A=2 , A=1 , A="absolute" , A=True , **A , ) -> Optional[int]:
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
_SCREAMING_SNAKE_CASE = vocab_size
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = max_position_embeddings
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = position_embedding_type
_SCREAMING_SNAKE_CASE = use_cache
| 314 |
'''simple docstring'''
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def lowerCamelCase ( ) ->Tuple:
_SCREAMING_SNAKE_CASE = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
_SCREAMING_SNAKE_CASE = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(__lowerCamelCase )
DownloadCommand.register_subcommand(__lowerCamelCase )
EnvironmentCommand.register_subcommand(__lowerCamelCase )
RunCommand.register_subcommand(__lowerCamelCase )
ServeCommand.register_subcommand(__lowerCamelCase )
UserCommands.register_subcommand(__lowerCamelCase )
AddNewModelCommand.register_subcommand(__lowerCamelCase )
AddNewModelLikeCommand.register_subcommand(__lowerCamelCase )
LfsCommands.register_subcommand(__lowerCamelCase )
PTtoTFCommand.register_subcommand(__lowerCamelCase )
# Let's go
_SCREAMING_SNAKE_CASE = parser.parse_args()
if not hasattr(__lowerCamelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
_SCREAMING_SNAKE_CASE = args.func(__lowerCamelCase )
service.run()
if __name__ == "__main__":
main()
| 314 | 1 |
class __A:
def __init__( self, A ):
"""simple docstring"""
_UpperCamelCase = n
_UpperCamelCase = [None] * self.n
_UpperCamelCase = 0 # index of the first element
_UpperCamelCase = 0
_UpperCamelCase = 0
def __len__( self ):
"""simple docstring"""
return self.size
def _UpperCamelCase ( self ):
"""simple docstring"""
return self.size == 0
def _UpperCamelCase ( self ):
"""simple docstring"""
return False if self.is_empty() else self.array[self.front]
def _UpperCamelCase ( self, A ):
"""simple docstring"""
if self.size >= self.n:
raise Exception('''QUEUE IS FULL''' )
_UpperCamelCase = data
_UpperCamelCase = (self.rear + 1) % self.n
self.size += 1
return self
def _UpperCamelCase ( self ):
"""simple docstring"""
if self.size == 0:
raise Exception('''UNDERFLOW''' )
_UpperCamelCase = self.array[self.front]
_UpperCamelCase = None
_UpperCamelCase = (self.front + 1) % self.n
self.size -= 1
return temp
| 705 |
import datasets
from .evaluate import evaluate
lowercase : str = """\
@article{hendrycks2021cuad,
title={CUAD: An Expert-Annotated NLP Dataset for Legal Contract Review},
author={Dan Hendrycks and Collin Burns and Anya Chen and Spencer Ball},
journal={arXiv preprint arXiv:2103.06268},
year={2021}
}
"""
lowercase : Tuple = """
This metric wrap the official scoring script for version 1 of the Contract
Understanding Atticus Dataset (CUAD).
Contract Understanding Atticus Dataset (CUAD) v1 is a corpus of more than 13,000 labels in 510
commercial legal contracts that have been manually labeled to identify 41 categories of important
clauses that lawyers look for when reviewing contracts in connection with corporate transactions.
"""
lowercase : List[str] = """
Computes CUAD scores (EM, F1, AUPR, Precision@80%Recall, and Precision@90%Recall).
Args:
predictions: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair as given in the references (see below)
- 'prediction_text': list of possible texts for the answer, as a list of strings
depending on a threshold on the confidence probability of each prediction.
references: List of question-answers dictionaries with the following key-values:
- 'id': id of the question-answer pair (see above),
- 'answers': a Dict in the CUAD dataset format
{
'text': list of possible texts for the answer, as a list of strings
'answer_start': list of start positions for the answer, as a list of ints
}
Note that answer_start values are not taken into account to compute the metric.
Returns:
'exact_match': Exact match (the normalized answer exactly match the gold answer)
'f1': The F-score of predicted tokens versus the gold answer
'aupr': Area Under the Precision-Recall curve
'prec_at_80_recall': Precision at 80% recall
'prec_at_90_recall': Precision at 90% recall
Examples:
>>> predictions = [{'prediction_text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.'], 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> references = [{'answers': {'answer_start': [143, 49], 'text': ['The seller:', 'The buyer/End-User: Shenzhen LOHAS Supply Chain Management Co., Ltd.']}, 'id': 'LohaCompanyltd_20191209_F-1_EX-10.16_11917878_EX-10.16_Supply Agreement__Parties'}]
>>> cuad_metric = datasets.load_metric(\"cuad\")
>>> results = cuad_metric.compute(predictions=predictions, references=references)
>>> print(results)
{'exact_match': 100.0, 'f1': 100.0, 'aupr': 0.0, 'prec_at_80_recall': 1.0, 'prec_at_90_recall': 1.0}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A( datasets.Metric ):
def _UpperCamelCase ( self ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'''predictions''': {
'''id''': datasets.Value('''string''' ),
'''prediction_text''': datasets.features.Sequence(datasets.Value('''string''' ) ),
},
'''references''': {
'''id''': datasets.Value('''string''' ),
'''answers''': datasets.features.Sequence(
{
'''text''': datasets.Value('''string''' ),
'''answer_start''': datasets.Value('''int32''' ),
} ),
},
} ), codebase_urls=['''https://www.atticusprojectai.org/cuad'''], reference_urls=['''https://www.atticusprojectai.org/cuad'''], )
def _UpperCamelCase ( self, A, A ):
"""simple docstring"""
_UpperCamelCase = {prediction['''id''']: prediction['''prediction_text'''] for prediction in predictions}
_UpperCamelCase = [
{
'''paragraphs''': [
{
'''qas''': [
{
'''answers''': [{'''text''': answer_text} for answer_text in ref['''answers''']['''text''']],
'''id''': ref['''id'''],
}
for ref in references
]
}
]
}
]
_UpperCamelCase = evaluate(dataset=A, predictions=A )
return score
| 105 | 0 |
def _lowerCamelCase( __snake_case , __snake_case ) -> float:
if mass < 0:
raise ValueError("The mass of a body cannot be negative" )
return 0.5 * mass * abs(__snake_case ) * abs(__snake_case )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 524 | # Lint as: python3
import os
import re
import urllib.parse
from pathlib import Path
from typing import Callable, List, Optional, Union
from zipfile import ZipFile
from ..utils.file_utils import cached_path, hf_github_url
from ..utils.logging import get_logger
from ..utils.version import Version
lowerCamelCase__ = get_logger(__name__)
class UpperCamelCase :
__UpperCamelCase = """dummy_data"""
__UpperCamelCase = """datasets"""
__UpperCamelCase = False
def __init__( self : Dict ,_lowerCAmelCase : str ,_lowerCAmelCase : str ,_lowerCAmelCase : Union[Version, str] ,_lowerCAmelCase : Optional[str] = None ,_lowerCAmelCase : bool = False ,_lowerCAmelCase : bool = True ,_lowerCAmelCase : Optional[List[Callable]] = None ,):
"""simple docstring"""
__snake_case = 0
__snake_case = dataset_name
__snake_case = cache_dir
__snake_case = use_local_dummy_data
__snake_case = config
# download_callbacks take a single url as input
__snake_case = download_callbacks or []
# if False, it doesn't load existing files and it returns the paths of the dummy files relative
# to the dummy_data zip file root
__snake_case = load_existing_dummy_data
# TODO(PVP, QL) might need to make this more general
__snake_case = str(_lowerCAmelCase )
# to be downloaded
__snake_case = None
__snake_case = None
@property
def UpperCamelCase_ ( self : str ):
"""simple docstring"""
if self._dummy_file is None:
__snake_case = self.download_dummy_data()
return self._dummy_file
@property
def UpperCamelCase_ ( self : Tuple ):
"""simple docstring"""
if self.config is not None:
# structure is dummy / config_name / version_name
return os.path.join("dummy" ,self.config.name ,self.version_name )
# structure is dummy / version_name
return os.path.join("dummy" ,self.version_name )
@property
def UpperCamelCase_ ( self : List[Any] ):
"""simple docstring"""
return os.path.join(self.dummy_data_folder ,"dummy_data.zip" )
def UpperCamelCase_ ( self : List[str] ):
"""simple docstring"""
__snake_case = (
self.local_path_to_dummy_data if self.use_local_dummy_data is True else self.github_path_to_dummy_data
)
__snake_case = cached_path(
_lowerCAmelCase ,cache_dir=self.cache_dir ,extract_compressed_file=_lowerCAmelCase ,force_extract=_lowerCAmelCase )
return os.path.join(_lowerCAmelCase ,self.dummy_file_name )
@property
def UpperCamelCase_ ( self : Dict ):
"""simple docstring"""
return os.path.join(self.datasets_scripts_dir ,self.dataset_name ,self.dummy_zip_file )
@property
def UpperCamelCase_ ( self : str ):
"""simple docstring"""
if self._bucket_url is None:
__snake_case = hf_github_url(self.dataset_name ,self.dummy_zip_file.replace(os.sep ,"/" ) )
return self._bucket_url
@property
def UpperCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
if os.path.isdir(self.dummy_file ):
return self.dummy_file
# else cut off path to file -> example `xsum`.
return "/".join(self.dummy_file.replace(os.sep ,"/" ).split("/" )[:-1] )
def UpperCamelCase_ ( self : str ,_lowerCAmelCase : List[Any] ,*_lowerCAmelCase : Optional[int] ):
"""simple docstring"""
if self.load_existing_dummy_data:
# dummy data is downloaded and tested
__snake_case = self.dummy_file
else:
# dummy data cannot be downloaded and only the path to dummy file is returned
__snake_case = self.dummy_file_name
# special case when data_url is a dict
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
return self.create_dummy_data_dict(_lowerCAmelCase ,_lowerCAmelCase )
elif isinstance(_lowerCAmelCase ,(list, tuple) ):
return self.create_dummy_data_list(_lowerCAmelCase ,_lowerCAmelCase )
else:
return self.create_dummy_data_single(_lowerCAmelCase ,_lowerCAmelCase )
def UpperCamelCase_ ( self : Dict ,_lowerCAmelCase : str ,*_lowerCAmelCase : Dict ):
"""simple docstring"""
return self.download_and_extract(_lowerCAmelCase )
def UpperCamelCase_ ( self : int ,_lowerCAmelCase : Optional[int] ,_lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
return self.download_and_extract(_lowerCAmelCase )
def UpperCamelCase_ ( self : List[str] ,_lowerCAmelCase : str ,*_lowerCAmelCase : List[str] ,**_lowerCAmelCase : List[Any] ):
"""simple docstring"""
return path
def UpperCamelCase_ ( self : Optional[int] ):
"""simple docstring"""
return {}
def UpperCamelCase_ ( self : str ,_lowerCAmelCase : int ,_lowerCAmelCase : Optional[int] ):
"""simple docstring"""
__snake_case = {}
for key, single_urls in data_url.items():
for download_callback in self.download_callbacks:
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
for single_url in single_urls:
download_callback(_lowerCAmelCase )
else:
__snake_case = single_urls
download_callback(_lowerCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
if isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
__snake_case = [os.path.join(_lowerCAmelCase ,urllib.parse.quote_plus(Path(_lowerCAmelCase ).name ) ) for x in single_urls]
else:
__snake_case = single_urls
__snake_case = os.path.join(_lowerCAmelCase ,urllib.parse.quote_plus(Path(_lowerCAmelCase ).name ) )
__snake_case = value
# make sure that values are unique
if all(isinstance(_lowerCAmelCase ,_lowerCAmelCase ) for i in dummy_data_dict.values() ) and len(set(dummy_data_dict.values() ) ) < len(
dummy_data_dict.values() ):
# append key to value to make its name unique
__snake_case = {key: value + key for key, value in dummy_data_dict.items()}
return dummy_data_dict
def UpperCamelCase_ ( self : List[str] ,_lowerCAmelCase : str ,_lowerCAmelCase : Any ):
"""simple docstring"""
__snake_case = []
# trick: if there are many shards named like `data.txt-000001-of-00300`, only use the first one
__snake_case = all(bool(re.findall("[0-9]{3,}-of-[0-9]{3,}" ,_lowerCAmelCase ) ) for url in data_url )
__snake_case = all(
url.startswith("https://ftp.ncbi.nlm.nih.gov/pubmed/baseline/pubmed" ) for url in data_url )
if data_url and (is_tf_records or is_pubmed_records):
__snake_case = [data_url[0]] * len(_lowerCAmelCase )
for single_url in data_url:
for download_callback in self.download_callbacks:
download_callback(_lowerCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__snake_case = os.path.join(_lowerCAmelCase ,urllib.parse.quote_plus(single_url.split("/" )[-1] ) )
dummy_data_list.append(_lowerCAmelCase )
return dummy_data_list
def UpperCamelCase_ ( self : Tuple ,_lowerCAmelCase : Any ,_lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
for download_callback in self.download_callbacks:
download_callback(_lowerCAmelCase )
# we force the name of each key to be the last file / folder name of the url path
# if the url has arguments, we need to encode them with urllib.parse.quote_plus
__snake_case = os.path.join(_lowerCAmelCase ,urllib.parse.quote_plus(data_url.split("/" )[-1] ) )
if os.path.exists(_lowerCAmelCase ) or not self.load_existing_dummy_data:
return value
else:
# Backward compatibility, maybe deprecate at one point.
# For many datasets with single url calls to dl_manager.download_and_extract,
# the dummy_data.zip file is actually the zipped downloaded file
# while now we expected the dummy_data.zip file to be a directory containing
# the downloaded file.
return path_to_dummy_data
def UpperCamelCase_ ( self : Any ):
"""simple docstring"""
pass
def UpperCamelCase_ ( self : Dict ):
"""simple docstring"""
pass
def UpperCamelCase_ ( self : Tuple ,_lowerCAmelCase : List[str] ):
"""simple docstring"""
def _iter_archive_members(_lowerCAmelCase : Tuple ):
# this preserves the order of the members inside the ZIP archive
__snake_case = Path(self.dummy_file ).parent
__snake_case = path.relative_to(_lowerCAmelCase )
with ZipFile(self.local_path_to_dummy_data ) as zip_file:
__snake_case = zip_file.namelist()
for member in members:
if member.startswith(relative_path.as_posix() ):
yield dummy_parent_path.joinpath(_lowerCAmelCase )
__snake_case = Path(_lowerCAmelCase )
__snake_case = _iter_archive_members(_lowerCAmelCase ) if self.use_local_dummy_data else path.rglob("*" )
for file_path in file_paths:
if file_path.is_file() and not file_path.name.startswith((".", "__") ):
yield file_path.relative_to(_lowerCAmelCase ).as_posix(), file_path.open("rb" )
def UpperCamelCase_ ( self : List[str] ,_lowerCAmelCase : Tuple ):
"""simple docstring"""
if not isinstance(_lowerCAmelCase ,_lowerCAmelCase ):
__snake_case = [paths]
for path in paths:
if os.path.isfile(_lowerCAmelCase ):
if os.path.basename(_lowerCAmelCase ).startswith((".", "__") ):
return
yield path
else:
for dirpath, dirnames, filenames in os.walk(_lowerCAmelCase ):
if os.path.basename(_lowerCAmelCase ).startswith((".", "__") ):
continue
dirnames.sort()
for filename in sorted(_lowerCAmelCase ):
if filename.startswith((".", "__") ):
continue
yield os.path.join(_lowerCAmelCase ,_lowerCAmelCase )
| 524 | 1 |
import requests
def _lowerCAmelCase ( A__ , A__ ):
lowercase__ = {'Content-Type': 'application/json'}
lowercase__ = requests.post(A__ , json={'text': message_body} , headers=A__ )
if response.status_code != 200:
lowercase__ = (
'Request to slack returned an error '
F'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(A__ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("<YOUR MESSAGE BODY>", "<SLACK CHANNEL URL>")
| 642 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
a__ : Union[str, Any] = logging.get_logger(__name__)
a__ : Optional[Any] = {
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
A : Union[str, Any] = "umt5"
A : List[str] = ["past_key_values"]
def __init__( self : List[Any] , lowerCAmelCase : Optional[int]=25_01_12 , lowerCAmelCase : str=5_12 , lowerCAmelCase : List[Any]=64 , lowerCAmelCase : Optional[int]=10_24 , lowerCAmelCase : Union[str, Any]=8 , lowerCAmelCase : Tuple=None , lowerCAmelCase : Optional[Any]=6 , lowerCAmelCase : int=32 , lowerCAmelCase : int=1_28 , lowerCAmelCase : List[str]=0.1 , lowerCAmelCase : List[str]=1E-6 , lowerCAmelCase : Optional[int]=1.0 , lowerCAmelCase : Optional[Any]="gated-gelu" , lowerCAmelCase : List[Any]=True , lowerCAmelCase : List[str]=True , lowerCAmelCase : List[Any]="T5Tokenizer" , lowerCAmelCase : str=True , lowerCAmelCase : Optional[int]=0 , lowerCAmelCase : Tuple=1 , lowerCAmelCase : Any=0 , **lowerCAmelCase : int , ) -> str:
"""simple docstring"""
super().__init__(
is_encoder_decoder=lowerCAmelCase , tokenizer_class=lowerCAmelCase , tie_word_embeddings=lowerCAmelCase , pad_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , **lowerCAmelCase , )
lowercase__ = vocab_size
lowercase__ = d_model
lowercase__ = d_kv
lowercase__ = d_ff
lowercase__ = num_layers
lowercase__ = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowercase__ = num_heads
lowercase__ = relative_attention_num_buckets
lowercase__ = relative_attention_max_distance
lowercase__ = dropout_rate
lowercase__ = layer_norm_epsilon
lowercase__ = initializer_factor
lowercase__ = feed_forward_proj
lowercase__ = use_cache
lowercase__ = self.feed_forward_proj.split('-')
lowercase__ = act_info[-1]
lowercase__ = act_info[0] == 'gated'
if len(lowerCAmelCase) > 1 and act_info[0] != "gated" or len(lowerCAmelCase) > 2:
raise ValueError(
f'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'')
if feed_forward_proj == "gated-gelu":
lowercase__ = 'gelu_new'
@property
def UpperCAmelCase ( self : Union[str, Any]) -> Dict:
"""simple docstring"""
return self.d_model
@property
def UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
return self.num_heads
@property
def UpperCAmelCase ( self : Optional[int]) -> Optional[Any]:
"""simple docstring"""
return self.num_layers
class UpperCAmelCase__( lowerCamelCase ):
'''simple docstring'''
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def UpperCAmelCase ( self : Optional[int]) -> Mapping[str, Mapping[int, str]]:
"""simple docstring"""
lowercase__ = {
'input_ids': {0: 'batch', 1: 'encoder_sequence'},
'attention_mask': {0: 'batch', 1: 'encoder_sequence'},
}
if self.use_past:
lowercase__ = 'past_encoder_sequence + sequence'
lowercase__ = {0: 'batch'}
lowercase__ = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
lowercase__ = {0: 'batch', 1: 'decoder_sequence'}
lowercase__ = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase , direction='inputs')
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
return 13
@property
def UpperCAmelCase ( self : Optional[Any]) -> float:
"""simple docstring"""
return 5E-4
| 642 | 1 |
def A ( _lowercase ):
assert isinstance(_lowercase , _lowercase ), f"""The input value of [n={number}] is not an integer"""
if number == 1:
return 2
elif number < 1:
SCREAMING_SNAKE_CASE : List[Any] = f"""The input value of [n={number}] has to be > 0"""
raise ValueError(_lowercase )
else:
SCREAMING_SNAKE_CASE : int = sylvester(number - 1 )
SCREAMING_SNAKE_CASE : Tuple = num - 1
SCREAMING_SNAKE_CASE : Union[str, Any] = num
return lower * upper + 1
if __name__ == "__main__":
print(f"""The 8th number in Sylvester's sequence: {sylvester(8)}""")
| 248 | def A ( _lowercase , _lowercase ):
return int((input_a, input_a).count(0 ) == 0 )
def A ( ):
assert and_gate(0 , 0 ) == 0
assert and_gate(0 , 1 ) == 0
assert and_gate(1 , 0 ) == 0
assert and_gate(1 , 1 ) == 1
if __name__ == "__main__":
test_and_gate()
print(and_gate(1, 0))
print(and_gate(0, 0))
print(and_gate(0, 1))
print(and_gate(1, 1))
| 248 | 1 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"google/umt5-small": "https://huggingface.co/google/umt5-small/resolve/main/config.json",
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Tuple = '''umt5'''
__lowercase : Union[str, Any] = ['''past_key_values''']
def __init__( self , lowerCAmelCase__=2_5_0_1_1_2 , lowerCAmelCase__=5_1_2 , lowerCAmelCase__=6_4 , lowerCAmelCase__=1_0_2_4 , lowerCAmelCase__=8 , lowerCAmelCase__=None , lowerCAmelCase__=6 , lowerCAmelCase__=3_2 , lowerCAmelCase__=1_2_8 , lowerCAmelCase__=0.1 , lowerCAmelCase__=1E-6 , lowerCAmelCase__=1.0 , lowerCAmelCase__="gated-gelu" , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__="T5Tokenizer" , lowerCAmelCase__=True , lowerCAmelCase__=0 , lowerCAmelCase__=1 , lowerCAmelCase__=0 , **lowerCAmelCase__ , ):
super().__init__(
is_encoder_decoder=lowerCAmelCase__ , tokenizer_class=lowerCAmelCase__ , tie_word_embeddings=lowerCAmelCase__ , pad_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , decoder_start_token_id=lowerCAmelCase__ , **lowerCAmelCase__ , )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = d_model
__SCREAMING_SNAKE_CASE = d_kv
__SCREAMING_SNAKE_CASE = d_ff
__SCREAMING_SNAKE_CASE = num_layers
__SCREAMING_SNAKE_CASE = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
__SCREAMING_SNAKE_CASE = num_heads
__SCREAMING_SNAKE_CASE = relative_attention_num_buckets
__SCREAMING_SNAKE_CASE = relative_attention_max_distance
__SCREAMING_SNAKE_CASE = dropout_rate
__SCREAMING_SNAKE_CASE = layer_norm_epsilon
__SCREAMING_SNAKE_CASE = initializer_factor
__SCREAMING_SNAKE_CASE = feed_forward_proj
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = self.feed_forward_proj.split("""-""")
__SCREAMING_SNAKE_CASE = act_info[-1]
__SCREAMING_SNAKE_CASE = act_info[0] == """gated"""
if len(lowerCAmelCase__) > 1 and act_info[0] != "gated" or len(lowerCAmelCase__) > 2:
raise ValueError(
f"`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""")
if feed_forward_proj == "gated-gelu":
__SCREAMING_SNAKE_CASE = """gelu_new"""
@property
def snake_case_ ( self):
return self.d_model
@property
def snake_case_ ( self):
return self.num_heads
@property
def snake_case_ ( self):
return self.num_layers
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def snake_case_ ( self):
__SCREAMING_SNAKE_CASE = {
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
__SCREAMING_SNAKE_CASE = """past_encoder_sequence + sequence"""
__SCREAMING_SNAKE_CASE = {0: """batch"""}
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """decoder_sequence"""}
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(lowerCAmelCase__ , direction="""inputs""")
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def snake_case_ ( self):
return 1_3
@property
def snake_case_ ( self):
return 5E-4
| 713 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
from dataclasses import dataclass
@dataclass
class SCREAMING_SNAKE_CASE_ :
"""simple docstring"""
__lowercase : int
__lowercase : TreeNode | None = None
__lowercase : TreeNode | None = None
__magic_name__ = namedtuple("CoinsDistribResult", "moves excess")
def _lowerCAmelCase ( UpperCamelCase_ ):
if root is None:
return 0
# Validation
def count_nodes(UpperCamelCase_ ) -> int:
if node is None:
return 0
return count_nodes(node.left ) + count_nodes(node.right ) + 1
def count_coins(UpperCamelCase_ ) -> int:
if node is None:
return 0
return count_coins(node.left ) + count_coins(node.right ) + node.data
if count_nodes(UpperCamelCase_ ) != count_coins(UpperCamelCase_ ):
raise ValueError("""The nodes number should be same as the number of coins""" )
# Main calculation
def get_distrib(UpperCamelCase_ ) -> CoinsDistribResult:
if node is None:
return CoinsDistribResult(0 , 1 )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = get_distrib(node.left )
__SCREAMING_SNAKE_CASE ,__SCREAMING_SNAKE_CASE = get_distrib(node.right )
__SCREAMING_SNAKE_CASE = 1 - left_distrib_excess
__SCREAMING_SNAKE_CASE = 1 - right_distrib_excess
__SCREAMING_SNAKE_CASE = (
left_distrib_moves
+ right_distrib_moves
+ abs(UpperCamelCase_ )
+ abs(UpperCamelCase_ )
)
__SCREAMING_SNAKE_CASE = node.data - coins_to_left - coins_to_right
return CoinsDistribResult(UpperCamelCase_ , UpperCamelCase_ )
return get_distrib(UpperCamelCase_ )[0]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 248 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase__ : Any ={
'configuration_time_series_transformer': [
'TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TimeSeriesTransformerConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase__ : Optional[int] =[
'TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TimeSeriesTransformerForPrediction',
'TimeSeriesTransformerModel',
'TimeSeriesTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase__ : Optional[Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 101 |
"""simple docstring"""
from importlib import import_module
from .logging import get_logger
__magic_name__ = get_logger(__name__)
class SCREAMING_SNAKE_CASE__ :
def __init__( self : Dict , SCREAMING_SNAKE_CASE_ : Tuple , SCREAMING_SNAKE_CASE_ : int=None ):
lowerCamelCase__ = attrs or []
if module is not None:
for key in module.__dict__:
if key in attrs or not key.startswith("""__""" ):
setattr(self , SCREAMING_SNAKE_CASE_ , getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) )
lowerCamelCase__ = module._original_module if isinstance(SCREAMING_SNAKE_CASE_ , _PatchedModuleObj ) else module
class SCREAMING_SNAKE_CASE__ :
snake_case = []
def __init__( self : Tuple , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : str , SCREAMING_SNAKE_CASE_ : Optional[Any] , SCREAMING_SNAKE_CASE_ : List[Any]=None ):
lowerCamelCase__ = obj
lowerCamelCase__ = target
lowerCamelCase__ = new
lowerCamelCase__ = target.split(""".""" )[0]
lowerCamelCase__ = {}
lowerCamelCase__ = attrs or []
def __enter__( self : Dict ):
*lowerCamelCase__ , lowerCamelCase__ = self.target.split(""".""" )
# Patch modules:
# it's used to patch attributes of submodules like "os.path.join";
# in this case we need to patch "os" and "os.path"
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
try:
lowerCamelCase__ = import_module(""".""".join(submodules[: i + 1] ) )
except ModuleNotFoundError:
continue
# We iterate over all the globals in self.obj in case we find "os" or "os.path"
for attr in self.obj.__dir__():
lowerCamelCase__ = getattr(self.obj , SCREAMING_SNAKE_CASE_ )
# We don't check for the name of the global, but rather if its value *is* "os" or "os.path".
# This allows to patch renamed modules like "from os import path as ospath".
if obj_attr is submodule or (
(isinstance(SCREAMING_SNAKE_CASE_ , _PatchedModuleObj ) and obj_attr._original_module is submodule)
):
lowerCamelCase__ = obj_attr
# patch at top level
setattr(self.obj , SCREAMING_SNAKE_CASE_ , _PatchedModuleObj(SCREAMING_SNAKE_CASE_ , attrs=self.attrs ) )
lowerCamelCase__ = getattr(self.obj , SCREAMING_SNAKE_CASE_ )
# construct lower levels patches
for key in submodules[i + 1 :]:
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , _PatchedModuleObj(getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) , attrs=self.attrs ) )
lowerCamelCase__ = getattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# finally set the target attribute
setattr(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.new )
# Patch attribute itself:
# it's used for builtins like "open",
# and also to patch "os.path.join" we may also need to patch "join"
# itself if it was imported as "from os.path import join".
if submodules: # if it's an attribute of a submodule like "os.path.join"
try:
lowerCamelCase__ = getattr(import_module(""".""".join(SCREAMING_SNAKE_CASE_ ) ) , SCREAMING_SNAKE_CASE_ )
except (AttributeError, ModuleNotFoundError):
return
# We iterate over all the globals in self.obj in case we find "os.path.join"
for attr in self.obj.__dir__():
# We don't check for the name of the global, but rather if its value *is* "os.path.join".
# This allows to patch renamed attributes like "from os.path import join as pjoin".
if getattr(self.obj , SCREAMING_SNAKE_CASE_ ) is attr_value:
lowerCamelCase__ = getattr(self.obj , SCREAMING_SNAKE_CASE_ )
setattr(self.obj , SCREAMING_SNAKE_CASE_ , self.new )
elif target_attr in globals()["__builtins__"]: # if it'a s builtin like "open"
lowerCamelCase__ = globals()["""__builtins__"""][target_attr]
setattr(self.obj , SCREAMING_SNAKE_CASE_ , self.new )
else:
raise RuntimeError(f"""Tried to patch attribute {target_attr} instead of a submodule.""" )
def __exit__( self : Optional[int] , *SCREAMING_SNAKE_CASE_ : Tuple ):
for attr in list(self.original ):
setattr(self.obj , SCREAMING_SNAKE_CASE_ , self.original.pop(SCREAMING_SNAKE_CASE_ ) )
def __UpperCAmelCase ( self : List[Any] ):
self.__enter__()
self._active_patches.append(self )
def __UpperCAmelCase ( self : str ):
try:
self._active_patches.remove(self )
except ValueError:
# If the patch hasn't been started this will fail
return None
return self.__exit__()
| 129 | 0 |
import json
import os
import unittest
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES, BioGptTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCamelCase_ ( _A ,unittest.TestCase ):
'''simple docstring'''
a__ = BioGptTokenizer
a__ = False
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> List[Any]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
A : Optional[Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"w</w>",
"r</w>",
"t</w>",
"lo",
"low",
"er</w>",
"low</w>",
"lowest</w>",
"newer</w>",
"wider</w>",
"<unk>",
]
A : Optional[int] = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
A : str = ["l o 123", "lo w 1456", "e r</w> 1789", ""]
A : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
A : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(__lowerCamelCase ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(__lowerCamelCase ) )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , __lowerCamelCase : Optional[Any] ) -> str:
A : Optional[Any] = "lower newer"
A : Union[str, Any] = "lower newer"
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Dict:
A : Optional[int] = BioGptTokenizer(self.vocab_file , self.merges_file )
A : str = "lower"
A : int = ["low", "er</w>"]
A : Any = tokenizer.tokenize(__lowerCamelCase )
self.assertListEqual(__lowerCamelCase , __lowerCamelCase )
A : List[Any] = tokens + ["<unk>"]
A : Tuple = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowerCamelCase ) , __lowerCamelCase )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> int:
A : Optional[int] = BioGptTokenizer.from_pretrained("microsoft/biogpt" )
A : Dict = tokenizer.encode("sequence builders" , add_special_tokens=__lowerCamelCase )
A : Optional[Any] = tokenizer.encode("multi-sequence build" , add_special_tokens=__lowerCamelCase )
A : str = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase )
A : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(__lowerCamelCase , __lowerCamelCase )
self.assertTrue(encoded_sentence == [2] + text )
self.assertTrue(encoded_pair == [2] + text + [2] + text_a ) | 17 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase = "x" , _lowerCamelCase = 10**-10 , _lowerCamelCase = 1 , ):
A : str = symbols(_lowerCamelCase )
A : int = lambdify(_lowerCamelCase , _lowerCamelCase )
A : List[str] = lambdify(_lowerCamelCase , diff(_lowerCamelCase , _lowerCamelCase ) )
A : Optional[int] = starting_point
while True:
if diff_function(_lowerCamelCase ) != 0:
A : Optional[Any] = prev_guess - multiplicity * func(_lowerCamelCase ) / diff_function(
_lowerCamelCase )
else:
raise ZeroDivisionError("Could not find root" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
A : int = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(F"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}""")
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
F"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
F"""{newton_raphson('exp(x) - 1', 10, precision=0.005)}""",
)
# Find root of cos(x)
print(F"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""") | 17 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def A__ ( UpperCAmelCase_="" ):
_UpperCamelCase : Any = tempfile.mkdtemp()
return os.path.join(UpperCAmelCase_ , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class lowercase__ ( unittest.TestCase ):
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase : List[str] = torch.rand(12 ,dtype=torch.floataa ) - 0.5
_UpperCamelCase : Optional[int] = AgentAudio(lowerCamelCase__ )
_UpperCamelCase : List[str] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCamelCase__ ,agent_type.to_raw() ,atol=1E-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(lowerCamelCase__ ) )
# Ensure that the file contains the same value as the original tensor
_UpperCamelCase , _UpperCamelCase : Union[str, Any] = sf.read(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ ,torch.tensor(lowerCamelCase__ ) ,atol=1E-4 ) )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = torch.rand(12 ,dtype=torch.floataa ) - 0.5
_UpperCamelCase : Any = get_new_path(suffix='.wav' )
sf.write(lowerCamelCase__ ,lowerCamelCase__ ,16000 )
_UpperCamelCase : List[Any] = AgentAudio(lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ ,agent_type.to_raw() ,atol=1E-4 ) )
self.assertEqual(agent_type.to_string() ,lowerCamelCase__ )
@require_vision
@require_torch
class lowercase__ ( unittest.TestCase ):
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_UpperCamelCase : int = torch.randint(0 ,256 ,(64, 64, 3) )
_UpperCamelCase : Optional[Any] = AgentImage(lowerCamelCase__ )
_UpperCamelCase : List[Any] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(lowerCamelCase__ ,agent_type._tensor ,atol=1E-4 ) )
self.assertIsInstance(agent_type.to_raw() ,Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase__ ) )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
_UpperCamelCase : str = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png'
_UpperCamelCase : Tuple = Image.open(lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = AgentImage(lowerCamelCase__ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase__ ) )
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
_UpperCamelCase : List[Any] = Path(get_tests_dir('fixtures/tests_samples/COCO' ) ) / '000000039769.png'
_UpperCamelCase : Union[str, Any] = Image.open(lowerCamelCase__ )
_UpperCamelCase : List[Any] = AgentImage(lowerCamelCase__ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(lowerCamelCase__ ) )
class lowercase__ ( unittest.TestCase ):
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
_UpperCamelCase : List[Any] = 'Hey!'
_UpperCamelCase : Optional[int] = AgentText(lowerCamelCase__ )
self.assertEqual(lowerCamelCase__ ,agent_type.to_string() )
self.assertEqual(lowerCamelCase__ ,agent_type.to_raw() )
self.assertEqual(lowerCamelCase__ ,lowerCamelCase__ )
| 195 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
snake_case_ : Tuple = False
@skip_mps
class lowercase__ ( lowercase , lowercase , lowercase , unittest.TestCase ):
lowercase__ = StableDiffusionAttendAndExcitePipeline
lowercase__ = False
lowercase__ = TEXT_TO_IMAGE_PARAMS
lowercase__ = TEXT_TO_IMAGE_BATCH_PARAMS.union({"""token_indices"""} )
lowercase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase__ = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def UpperCamelCase_ ( cls : Any ):
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(lowerCamelCase__ )
@classmethod
def UpperCamelCase_ ( cls : List[Any] ):
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(lowerCamelCase__ )
def UpperCamelCase_ ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
_UpperCamelCase : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=1 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,attention_head_dim=(2, 4) ,use_linear_projection=lowerCamelCase__ ,)
_UpperCamelCase : List[str] = DDIMScheduler(
beta_start=0.0_0_0_8_5 ,beta_end=0.0_1_2 ,beta_schedule='scaled_linear' ,clip_sample=lowerCamelCase__ ,set_alpha_to_one=lowerCamelCase__ ,)
torch.manual_seed(0 )
_UpperCamelCase : List[str] = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,sample_size=128 ,)
torch.manual_seed(0 )
_UpperCamelCase : List[str] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act='gelu' ,projection_dim=512 ,)
_UpperCamelCase : List[str] = CLIPTextModel(lowerCamelCase__ )
_UpperCamelCase : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_UpperCamelCase : str = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : str=0 ):
'''simple docstring'''
if str(lowerCamelCase__ ).startswith('mps' ):
_UpperCamelCase : Union[str, Any] = torch.manual_seed(lowerCamelCase__ )
else:
_UpperCamelCase : int = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
_UpperCamelCase : Union[str, Any] = {
'prompt': 'a cat and a frog',
'token_indices': [2, 5],
'generator': generator,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
'max_iter_to_alter': 2,
'thresholds': {0: 0.7},
}
return inputs
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
_UpperCamelCase : Any = 'cpu'
_UpperCamelCase : List[Any] = self.get_dummy_components()
_UpperCamelCase : Union[str, Any] = self.pipeline_class(**lowerCamelCase__ )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = self.get_dummy_inputs(lowerCamelCase__ )
_UpperCamelCase : Optional[int] = pipe(**lowerCamelCase__ ).images
_UpperCamelCase : Union[str, Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape ,(1, 64, 64, 3) )
_UpperCamelCase : Union[str, Any] = np.array(
[0.6_3_9_0_5_3_6_4, 0.6_2_8_9_7_3_0_7, 0.4_8_5_9_9_0_1_7, 0.5_1_3_3_6_2_4, 0.5_5_5_0_0_4_8, 0.4_5_7_6_9_5_1_6, 0.5_0_3_2_6_9_7_3, 0.5_0_2_3_1_3_9, 0.4_5_3_8_4_4_9_6] )
_UpperCamelCase : Tuple = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(lowerCamelCase__ ,1E-3 )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def UpperCamelCase_ ( self : Any ):
'''simple docstring'''
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
self._test_inference_batch_single_identical(batch_size=2 ,expected_max_diff=7E-4 )
def UpperCamelCase_ ( self : List[str] ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def UpperCamelCase_ ( self : Tuple ):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def UpperCamelCase_ ( self : int ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=5E-4 )
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class lowercase__ ( unittest.TestCase ):
@classmethod
def UpperCamelCase_ ( cls : Dict ):
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(lowerCamelCase__ )
@classmethod
def UpperCamelCase_ ( cls : Optional[Any] ):
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(lowerCamelCase__ )
def UpperCamelCase_ ( self : str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase_ ( self : List[Any] ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = torch.manual_seed(51 )
_UpperCamelCase : Optional[Any] = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,safety_checker=lowerCamelCase__ ,torch_dtype=torch.floataa )
pipe.to('cuda' )
_UpperCamelCase : Any = 'a painting of an elephant with glasses'
_UpperCamelCase : str = [5, 7]
_UpperCamelCase : int = pipe(
prompt=lowerCamelCase__ ,token_indices=lowerCamelCase__ ,guidance_scale=7.5 ,generator=lowerCamelCase__ ,num_inference_steps=5 ,max_iter_to_alter=5 ,output_type='numpy' ,).images[0]
_UpperCamelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' )
assert np.abs((expected_image - image).max() ) < 5E-1
| 195 | 1 |
import doctest
import glob
import importlib
import inspect
import os
import re
from contextlib import contextmanager
from functools import wraps
from unittest.mock import patch
import numpy as np
import pytest
from absl.testing import parameterized
import datasets
from datasets import load_metric
from .utils import for_all_test_methods, local, slow
# mark all tests as integration
a__ = pytest.mark.integration
a__ = {"""comet"""}
a__ = importlib.util.find_spec("""fairseq""") is not None
a__ = {"""code_eval"""}
a__ = os.name == """nt"""
a__ = {"""bertscore""", """frugalscore""", """perplexity"""}
a__ = importlib.util.find_spec("""transformers""") is not None
def _UpperCAmelCase ( a : Any ):
@wraps(a )
def wrapper(self : int , a : Tuple ):
if not _has_fairseq and metric_name in REQUIRE_FAIRSEQ:
self.skipTest("""\"test requires Fairseq\"""" )
else:
test_case(self , a )
return wrapper
def _UpperCAmelCase ( a : Tuple ):
@wraps(a )
def wrapper(self : Optional[int] , a : List[Any] ):
if not _has_transformers and metric_name in REQUIRE_TRANSFORMERS:
self.skipTest("""\"test requires transformers\"""" )
else:
test_case(self , a )
return wrapper
def _UpperCAmelCase ( a : Any ):
@wraps(a )
def wrapper(self : List[Any] , a : Optional[Any] ):
if _on_windows and metric_name in UNSUPPORTED_ON_WINDOWS:
self.skipTest("""\"test not supported on Windows\"""" )
else:
test_case(self , a )
return wrapper
def _UpperCAmelCase ( ):
snake_case__ = [metric_dir.split(os.sep )[-2] for metric_dir in glob.glob("""./metrics/*/""" )]
return [{"testcase_name": x, "metric_name": x} for x in metrics if x != "gleu"] # gleu is unfinished
@parameterized.named_parameters(get_local_metric_names() )
@for_all_test_methods(
lowercase_ , lowercase_ , lowercase_ )
@local
class _lowerCAmelCase ( parameterized.TestCase ):
"""simple docstring"""
_lowercase : Optional[Any] = {}
_lowercase : Optional[int] = None
@pytest.mark.filterwarnings("""ignore:metric_module_factory is deprecated:FutureWarning""")
@pytest.mark.filterwarnings("""ignore:load_metric is deprecated:FutureWarning""")
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Dict):
'''simple docstring'''
snake_case__ = """[...]"""
snake_case__ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , UpperCamelCase__)).module_path)
snake_case__ = datasets.load.import_main_class(metric_module.__name__ , dataset=UpperCamelCase__)
# check parameters
snake_case__ = inspect.signature(metric._compute).parameters
self.assertTrue(all(p.kind != p.VAR_KEYWORD for p in parameters.values())) # no **kwargs
# run doctest
with self.patch_intensive_calls(UpperCamelCase__ , metric_module.__name__):
with self.use_local_metrics():
try:
snake_case__ = doctest.testmod(UpperCamelCase__ , verbose=UpperCamelCase__ , raise_on_error=UpperCamelCase__)
except doctest.UnexpectedException as e:
raise e.exc_info[1] # raise the exception that doctest caught
self.assertEqual(results.failed , 0)
self.assertGreater(results.attempted , 1)
@slow
def __magic_name__ ( self : Any , UpperCamelCase__ : Dict):
'''simple docstring'''
snake_case__ = """[...]"""
snake_case__ = importlib.import_module(
datasets.load.metric_module_factory(os.path.join("""metrics""" , UpperCamelCase__)).module_path)
# run doctest
with self.use_local_metrics():
snake_case__ = doctest.testmod(UpperCamelCase__ , verbose=UpperCamelCase__ , raise_on_error=UpperCamelCase__)
self.assertEqual(results.failed , 0)
self.assertGreater(results.attempted , 1)
@contextmanager
def __magic_name__ ( self : int , UpperCamelCase__ : int , UpperCamelCase__ : List[str]):
'''simple docstring'''
if metric_name in self.INTENSIVE_CALLS_PATCHER:
with self.INTENSIVE_CALLS_PATCHER[metric_name](UpperCamelCase__):
yield
else:
yield
@contextmanager
def __magic_name__ ( self : List[Any]):
'''simple docstring'''
def load_local_metric(UpperCamelCase__ : Optional[Any] , *UpperCamelCase__ : Dict , **UpperCamelCase__ : Union[str, Any]):
return load_metric(os.path.join("""metrics""" , UpperCamelCase__) , *UpperCamelCase__ , **UpperCamelCase__)
with patch("""datasets.load_metric""") as mock_load_metric:
snake_case__ = load_local_metric
yield
@classmethod
def __magic_name__ ( cls : Optional[Any] , UpperCamelCase__ : Optional[Any]):
'''simple docstring'''
def wrapper(UpperCamelCase__ : Dict):
snake_case__ = contextmanager(UpperCamelCase__)
snake_case__ = patcher
return patcher
return wrapper
@LocalMetricTest.register_intensive_calls_patcher("""bleurt""" )
def _UpperCAmelCase ( a : Dict ):
import tensorflow.compat.va as tf
from bleurt.score import Predictor
tf.flags.DEFINE_string("""sv""" , """""" , """""" ) # handle pytest cli flags
class _lowerCAmelCase ( lowercase_ ):
"""simple docstring"""
def __magic_name__ ( self : Any , UpperCamelCase__ : Dict):
'''simple docstring'''
assert len(input_dict["""input_ids"""]) == 2
return np.array([1.03, 1.04])
# mock predict_fn which is supposed to do a forward pass with a bleurt model
with patch("""bleurt.score._create_predictor""" ) as mock_create_predictor:
snake_case__ = MockedPredictor()
yield
@LocalMetricTest.register_intensive_calls_patcher("""bertscore""" )
def _UpperCAmelCase ( a : str ):
import torch
def bert_cos_score_idf(a : Optional[Any] , a : int , *a : Dict , **a : Any ):
return torch.tensor([[1.0, 1.0, 1.0]] * len(a ) )
# mock get_model which is supposed to do download a bert model
# mock bert_cos_score_idf which is supposed to do a forward pass with a bert model
with patch("""bert_score.scorer.get_model""" ), patch(
"""bert_score.scorer.bert_cos_score_idf""" ) as mock_bert_cos_score_idf:
snake_case__ = bert_cos_score_idf
yield
@LocalMetricTest.register_intensive_calls_patcher("""comet""" )
def _UpperCAmelCase ( a : Optional[Any] ):
def load_from_checkpoint(a : int ):
class _lowerCAmelCase :
"""simple docstring"""
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Optional[int] , *UpperCamelCase__ : Optional[Any] , **UpperCamelCase__ : str):
'''simple docstring'''
assert len(UpperCamelCase__) == 2
snake_case__ = [0.19, 0.92]
return scores, sum(UpperCamelCase__) / len(UpperCamelCase__)
return Model()
# mock load_from_checkpoint which is supposed to do download a bert model
# mock load_from_checkpoint which is supposed to do download a bert model
with patch("""comet.download_model""" ) as mock_download_model:
snake_case__ = None
with patch("""comet.load_from_checkpoint""" ) as mock_load_from_checkpoint:
snake_case__ = load_from_checkpoint
yield
def _UpperCAmelCase ( ):
snake_case__ = load_metric(os.path.join("""metrics""" , """seqeval""" ) )
snake_case__ = """ERROR"""
snake_case__ = F'''Scheme should be one of [IOB1, IOB2, IOE1, IOE2, IOBES, BILOU], got {wrong_scheme}'''
with pytest.raises(a , match=re.escape(a ) ):
metric.compute(predictions=[] , references=[] , scheme=a )
| 99 |
import unittest
import torch
from diffusers import VQModel
from diffusers.utils import floats_tensor, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class _lowerCAmelCase ( lowercase_ , lowercase_ , unittest.TestCase ):
"""simple docstring"""
_lowercase : Optional[int] = VQModel
_lowercase : str = '''sample'''
@property
def __magic_name__ ( self : Union[str, Any] , UpperCamelCase__ : Tuple=(3_2, 3_2)):
'''simple docstring'''
snake_case__ = 4
snake_case__ = 3
snake_case__ = floats_tensor((batch_size, num_channels) + sizes).to(UpperCamelCase__)
return {"sample": image}
@property
def __magic_name__ ( self : str):
'''simple docstring'''
return (3, 3_2, 3_2)
@property
def __magic_name__ ( self : List[str]):
'''simple docstring'''
return (3, 3_2, 3_2)
def __magic_name__ ( self : int):
'''simple docstring'''
snake_case__ = {
"""block_out_channels""": [3_2, 6_4],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 3,
}
snake_case__ = self.dummy_input
return init_dict, inputs_dict
def __magic_name__ ( self : Optional[int]):
'''simple docstring'''
pass
def __magic_name__ ( self : Tuple):
'''simple docstring'''
pass
def __magic_name__ ( self : str):
'''simple docstring'''
snake_case__ , snake_case__ = VQModel.from_pretrained("""fusing/vqgan-dummy""" , output_loading_info=UpperCamelCase__)
self.assertIsNotNone(UpperCamelCase__)
self.assertEqual(len(loading_info["""missing_keys"""]) , 0)
model.to(UpperCamelCase__)
snake_case__ = model(**self.dummy_input)
assert image is not None, "Make sure output is not None"
def __magic_name__ ( self : Dict):
'''simple docstring'''
snake_case__ = VQModel.from_pretrained("""fusing/vqgan-dummy""")
model.to(UpperCamelCase__).eval()
torch.manual_seed(0)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(0)
snake_case__ = torch.randn(1 , model.config.in_channels , model.config.sample_size , model.config.sample_size)
snake_case__ = image.to(UpperCamelCase__)
with torch.no_grad():
snake_case__ = model(UpperCamelCase__).sample
snake_case__ = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
snake_case__ = torch.tensor([-0.01_53, -0.40_44, -0.18_80, -0.51_61, -0.24_18, -0.40_72, -0.16_12, -0.06_33, -0.01_43])
# fmt: on
self.assertTrue(torch.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-3))
| 99 | 1 |
import unittest
from diffusers.models.unet_ad_blocks import * # noqa F403
from diffusers.utils import torch_device
from .test_unet_blocks_common import UNetBlockTesterMixin
class _UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
a = DownBlockaD # noqa F405
a = '''down'''
def _lowerCamelCase ( self ):
A_ : Tuple = [-0.0232, -0.9869, 0.8054, -0.0637, -0.1688, -1.4264, 0.4470, -1.3394, 0.0904]
super().test_output(__SCREAMING_SNAKE_CASE )
class _UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
a = ResnetDownsampleBlockaD # noqa F405
a = '''down'''
def _lowerCamelCase ( self ):
A_ : Tuple = [0.0710, 0.2410, -0.7320, -1.0757, -1.1343, 0.3540, -0.0133, -0.2576, 0.0948]
super().test_output(__SCREAMING_SNAKE_CASE )
class _UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
a = AttnDownBlockaD # noqa F405
a = '''down'''
def _lowerCamelCase ( self ):
A_ : Union[str, Any] = [0.0636, 0.8964, -0.6234, -1.0131, 0.0844, 0.4935, 0.3437, 0.0911, -0.2957]
super().test_output(__SCREAMING_SNAKE_CASE )
class _UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
a = CrossAttnDownBlockaD # noqa F405
a = '''down'''
def _lowerCamelCase ( self ):
A_ , A_ : Tuple = super().prepare_init_args_and_inputs_for_common()
A_ : Optional[Any] = 32
return init_dict, inputs_dict
def _lowerCamelCase ( self ):
A_ : Union[str, Any] = [0.2238, -0.7396, -0.2255, -0.3829, 0.1925, 1.1665, 0.0603, -0.7295, 0.1983]
super().test_output(__SCREAMING_SNAKE_CASE )
class _UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
a = SimpleCrossAttnDownBlockaD # noqa F405
a = '''down'''
@property
def _lowerCamelCase ( self ):
return super().get_dummy_input(include_encoder_hidden_states=__SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( self ):
A_ , A_ : Union[str, Any] = super().prepare_init_args_and_inputs_for_common()
A_ : List[str] = 32
return init_dict, inputs_dict
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def _lowerCamelCase ( self ):
A_ : Optional[Any] = [0.7921, -0.0992, -0.1962, -0.7695, -0.4242, 0.7804, 0.4737, 0.2765, 0.3338]
super().test_output(__SCREAMING_SNAKE_CASE )
class _UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
a = SkipDownBlockaD # noqa F405
a = '''down'''
@property
def _lowerCamelCase ( self ):
return super().get_dummy_input(include_skip_sample=__SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( self ):
A_ : Optional[int] = [-0.0845, -0.2087, -0.2465, 0.0971, 0.1900, -0.0484, 0.2664, 0.4179, 0.5069]
super().test_output(__SCREAMING_SNAKE_CASE )
class _UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
a = AttnSkipDownBlockaD # noqa F405
a = '''down'''
@property
def _lowerCamelCase ( self ):
return super().get_dummy_input(include_skip_sample=__SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( self ):
A_ : Any = [0.5539, 0.1609, 0.4924, 0.0537, -0.1995, 0.4050, 0.0979, -0.2721, -0.0642]
super().test_output(__SCREAMING_SNAKE_CASE )
class _UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
a = DownEncoderBlockaD # noqa F405
a = '''down'''
@property
def _lowerCamelCase ( self ):
return super().get_dummy_input(include_temb=__SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( self ):
A_ : Any = {
"""in_channels""": 32,
"""out_channels""": 32,
}
A_ : Tuple = self.dummy_input
return init_dict, inputs_dict
def _lowerCamelCase ( self ):
A_ : Optional[Any] = [1.1102, 0.5302, 0.4872, -0.0023, -0.8042, 0.0483, -0.3489, -0.5632, 0.7626]
super().test_output(__SCREAMING_SNAKE_CASE )
class _UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
a = AttnDownEncoderBlockaD # noqa F405
a = '''down'''
@property
def _lowerCamelCase ( self ):
return super().get_dummy_input(include_temb=__SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( self ):
A_ : Optional[int] = {
"""in_channels""": 32,
"""out_channels""": 32,
}
A_ : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def _lowerCamelCase ( self ):
A_ : Dict = [0.8966, -0.1486, 0.8568, 0.8141, -0.9046, -0.1342, -0.0972, -0.7417, 0.1538]
super().test_output(__SCREAMING_SNAKE_CASE )
class _UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
a = UNetMidBlockaD # noqa F405
a = '''mid'''
def _lowerCamelCase ( self ):
A_ : str = {
"""in_channels""": 32,
"""temb_channels""": 128,
}
A_ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
def _lowerCamelCase ( self ):
A_ : int = [-0.1062, 1.7248, 0.3494, 1.4569, -0.0910, -1.2421, -0.9984, 0.6736, 1.0028]
super().test_output(__SCREAMING_SNAKE_CASE )
class _UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
a = UNetMidBlockaDCrossAttn # noqa F405
a = '''mid'''
def _lowerCamelCase ( self ):
A_ , A_ : Optional[Any] = super().prepare_init_args_and_inputs_for_common()
A_ : str = 32
return init_dict, inputs_dict
def _lowerCamelCase ( self ):
A_ : Union[str, Any] = [0.0187, 2.4220, 0.4484, 1.1203, -0.6121, -1.5122, -0.8270, 0.7851, 1.8335]
super().test_output(__SCREAMING_SNAKE_CASE )
class _UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
a = UNetMidBlockaDSimpleCrossAttn # noqa F405
a = '''mid'''
@property
def _lowerCamelCase ( self ):
return super().get_dummy_input(include_encoder_hidden_states=__SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( self ):
A_ , A_ : int = super().prepare_init_args_and_inputs_for_common()
A_ : str = 32
return init_dict, inputs_dict
def _lowerCamelCase ( self ):
A_ : List[Any] = [0.7143, 1.9974, 0.5448, 1.3977, 0.1282, -1.1237, -1.4238, 0.5530, 0.8880]
super().test_output(__SCREAMING_SNAKE_CASE )
class _UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
a = UpBlockaD # noqa F405
a = '''up'''
@property
def _lowerCamelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=__SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( self ):
A_ : List[str] = [-0.2041, -0.4165, -0.3022, 0.0041, -0.6628, -0.7053, 0.1928, -0.0325, 0.0523]
super().test_output(__SCREAMING_SNAKE_CASE )
class _UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
a = ResnetUpsampleBlockaD # noqa F405
a = '''up'''
@property
def _lowerCamelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=__SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( self ):
A_ : List[str] = [0.2287, 0.3549, -0.1346, 0.4797, -0.1715, -0.9649, 0.7305, -0.5864, -0.6244]
super().test_output(__SCREAMING_SNAKE_CASE )
class _UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
a = CrossAttnUpBlockaD # noqa F405
a = '''up'''
@property
def _lowerCamelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=__SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( self ):
A_ , A_ : Optional[int] = super().prepare_init_args_and_inputs_for_common()
A_ : Optional[Any] = 32
return init_dict, inputs_dict
def _lowerCamelCase ( self ):
A_ : Dict = [-0.1403, -0.3515, -0.0420, -0.1425, 0.3167, 0.5094, -0.2181, 0.5931, 0.5582]
super().test_output(__SCREAMING_SNAKE_CASE )
class _UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
a = SimpleCrossAttnUpBlockaD # noqa F405
a = '''up'''
@property
def _lowerCamelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=__SCREAMING_SNAKE_CASE , include_encoder_hidden_states=__SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( self ):
A_ , A_ : List[Any] = super().prepare_init_args_and_inputs_for_common()
A_ : List[Any] = 32
return init_dict, inputs_dict
def _lowerCamelCase ( self ):
A_ : Tuple = [0.2645, 0.1480, 0.0909, 0.8044, -0.9758, -0.9083, 0.0994, -1.1453, -0.7402]
super().test_output(__SCREAMING_SNAKE_CASE )
class _UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
a = AttnUpBlockaD # noqa F405
a = '''up'''
@property
def _lowerCamelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=__SCREAMING_SNAKE_CASE )
@unittest.skipIf(torch_device == """mps""" , """MPS result is not consistent""" )
def _lowerCamelCase ( self ):
A_ : List[str] = [0.0979, 0.1326, 0.0021, 0.0659, 0.2249, 0.0059, 0.1132, 0.5952, 0.1033]
super().test_output(__SCREAMING_SNAKE_CASE )
class _UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
a = SkipUpBlockaD # noqa F405
a = '''up'''
@property
def _lowerCamelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=__SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( self ):
A_ : int = [-0.0893, -0.1234, -0.1506, -0.0332, 0.0123, -0.0211, 0.0566, 0.0143, 0.0362]
super().test_output(__SCREAMING_SNAKE_CASE )
class _UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
a = AttnSkipUpBlockaD # noqa F405
a = '''up'''
@property
def _lowerCamelCase ( self ):
return super().get_dummy_input(include_res_hidden_states_tuple=__SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( self ):
A_ : Dict = [0.0361, 0.0617, 0.2787, -0.0350, 0.0342, 0.3421, -0.0843, 0.0913, 0.3015]
super().test_output(__SCREAMING_SNAKE_CASE )
class _UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
a = UpDecoderBlockaD # noqa F405
a = '''up'''
@property
def _lowerCamelCase ( self ):
return super().get_dummy_input(include_temb=__SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( self ):
A_ : List[str] = {"""in_channels""": 32, """out_channels""": 32}
A_ : Union[str, Any] = self.dummy_input
return init_dict, inputs_dict
def _lowerCamelCase ( self ):
A_ : int = [0.4404, 0.1998, -0.9886, -0.3320, -0.3128, -0.7034, -0.6955, -0.2338, -0.3137]
super().test_output(__SCREAMING_SNAKE_CASE )
class _UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
a = AttnUpDecoderBlockaD # noqa F405
a = '''up'''
@property
def _lowerCamelCase ( self ):
return super().get_dummy_input(include_temb=__SCREAMING_SNAKE_CASE )
def _lowerCamelCase ( self ):
A_ : List[Any] = {"""in_channels""": 32, """out_channels""": 32}
A_ : Optional[int] = self.dummy_input
return init_dict, inputs_dict
def _lowerCamelCase ( self ):
A_ : Tuple = [0.6738, 0.4491, 0.1055, 1.0710, 0.7316, 0.3339, 0.3352, 0.1023, 0.3568]
super().test_output(__SCREAMING_SNAKE_CASE )
| 569 |
"""simple docstring"""
lowerCAmelCase__ ="ABCDEFGHIJKLMNOPQRSTUVWXYZ"
def _a ( ) -> None:
__SCREAMING_SNAKE_CASE = input('''Enter message: ''' )
__SCREAMING_SNAKE_CASE = input('''Enter key [alphanumeric]: ''' )
__SCREAMING_SNAKE_CASE = input('''Encrypt/Decrypt [e/d]: ''' )
if mode.lower().startswith('''e''' ):
__SCREAMING_SNAKE_CASE = '''encrypt'''
__SCREAMING_SNAKE_CASE = encrypt_message(UpperCAmelCase__ , UpperCAmelCase__ )
elif mode.lower().startswith('''d''' ):
__SCREAMING_SNAKE_CASE = '''decrypt'''
__SCREAMING_SNAKE_CASE = decrypt_message(UpperCAmelCase__ , UpperCAmelCase__ )
print(f"""\n{mode.title()}ed message:""" )
print(UpperCAmelCase__ )
def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> str:
return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , '''encrypt''' )
def _a ( UpperCAmelCase__ , UpperCAmelCase__ ) -> str:
return translate_message(UpperCAmelCase__ , UpperCAmelCase__ , '''decrypt''' )
def _a ( UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ ) -> str:
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = key.upper()
for symbol in message:
__SCREAMING_SNAKE_CASE = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(UpperCAmelCase__ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = 0
else:
translated.append(UpperCAmelCase__ )
return "".join(UpperCAmelCase__ )
if __name__ == "__main__":
main()
| 482 | 0 |
from __future__ import annotations
import json
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
_lowerCamelCase ={"UserAgent": UserAgent().random}
def snake_case__ ( lowerCAmelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE =script.contents[0]
SCREAMING_SNAKE_CASE =json.loads(data[data.find('{"config"' ) : -1] )
return info["entry_data"]["ProfilePage"][0]["graphql"]["user"]
class a_ :
"""simple docstring"""
def __init__( self : Dict ,snake_case : List[str] ):
SCREAMING_SNAKE_CASE =f'https://www.instagram.com/{username}/'
SCREAMING_SNAKE_CASE =self.get_json()
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE =requests.get(self.url ,headers=snake_case ).text
SCREAMING_SNAKE_CASE =BeautifulSoup(snake_case ,'html.parser' ).find_all('script' )
try:
return extract_user_profile(scripts[4] )
except (json.decoder.JSONDecodeError, KeyError):
return extract_user_profile(scripts[3] )
def __repr__( self : List[str] ):
return f'{self.__class__.__name__}(\'{self.username}\')'
def __str__( self : Union[str, Any] ):
return f'{self.fullname} ({self.username}) is {self.biography}'
@property
def _lowerCAmelCase ( self : Any ):
return self.user_data["username"]
@property
def _lowerCAmelCase ( self : Tuple ):
return self.user_data["full_name"]
@property
def _lowerCAmelCase ( self : Dict ):
return self.user_data["biography"]
@property
def _lowerCAmelCase ( self : Tuple ):
return self.user_data["business_email"]
@property
def _lowerCAmelCase ( self : Union[str, Any] ):
return self.user_data["external_url"]
@property
def _lowerCAmelCase ( self : Optional[Any] ):
return self.user_data["edge_followed_by"]["count"]
@property
def _lowerCAmelCase ( self : int ):
return self.user_data["edge_follow"]["count"]
@property
def _lowerCAmelCase ( self : Union[str, Any] ):
return self.user_data["edge_owner_to_timeline_media"]["count"]
@property
def _lowerCAmelCase ( self : Optional[Any] ):
return self.user_data["profile_pic_url_hd"]
@property
def _lowerCAmelCase ( self : Any ):
return self.user_data["is_verified"]
@property
def _lowerCAmelCase ( self : int ):
return self.user_data["is_private"]
def snake_case__ ( lowerCAmelCase_ = "github" ):
"""simple docstring"""
import os
if os.environ.get('CI' ):
return # test failing on GitHub Actions
SCREAMING_SNAKE_CASE =InstagramUser(lowerCAmelCase_ )
assert instagram_user.user_data
assert isinstance(instagram_user.user_data, lowerCAmelCase_ )
assert instagram_user.username == username
if username != "github":
return
assert instagram_user.fullname == "GitHub"
assert instagram_user.biography == "Built for developers."
assert instagram_user.number_of_posts > 150
assert instagram_user.number_of_followers > 120000
assert instagram_user.number_of_followings > 15
assert instagram_user.email == "support@github.com"
assert instagram_user.website == "https://github.com/readme"
assert instagram_user.profile_picture_url.startswith('https://instagram.' )
assert instagram_user.is_verified is True
assert instagram_user.is_private is False
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowerCamelCase =InstagramUser("github")
print(instagram_user)
print(f'{instagram_user.number_of_posts = }')
print(f'{instagram_user.number_of_followers = }')
print(f'{instagram_user.number_of_followings = }')
print(f'{instagram_user.email = }')
print(f'{instagram_user.website = }')
print(f'{instagram_user.profile_picture_url = }')
print(f'{instagram_user.is_verified = }')
print(f'{instagram_user.is_private = }')
| 252 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class a_ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : Tuple ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowerCAmelCase ( self : Optional[Any] ):
SCREAMING_SNAKE_CASE =1
SCREAMING_SNAKE_CASE =3
SCREAMING_SNAKE_CASE =(32, 32)
SCREAMING_SNAKE_CASE =floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(snake_case )
return image
@property
def _lowerCAmelCase ( self : Union[str, Any] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE =UNetaDConditionModel(
block_out_channels=(32, 32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=7 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,attention_head_dim=8 ,use_linear_projection=snake_case ,only_cross_attention=(True, True, False) ,num_class_embeds=100 ,)
return model
@property
def _lowerCAmelCase ( self : Union[str, Any] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE =AutoencoderKL(
block_out_channels=[32, 32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,)
return model
@property
def _lowerCAmelCase ( self : Union[str, Any] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE =CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act='gelu' ,projection_dim=512 ,)
return CLIPTextModel(snake_case )
def _lowerCAmelCase ( self : Optional[int] ):
SCREAMING_SNAKE_CASE ='cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE =self.dummy_cond_unet_upscale
SCREAMING_SNAKE_CASE =DDPMScheduler()
SCREAMING_SNAKE_CASE =DDIMScheduler(prediction_type='v_prediction' )
SCREAMING_SNAKE_CASE =self.dummy_vae
SCREAMING_SNAKE_CASE =self.dummy_text_encoder
SCREAMING_SNAKE_CASE =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE =self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0]
SCREAMING_SNAKE_CASE =Image.fromarray(np.uinta(snake_case ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE =StableDiffusionUpscalePipeline(
unet=snake_case ,low_res_scheduler=snake_case ,scheduler=snake_case ,vae=snake_case ,text_encoder=snake_case ,tokenizer=snake_case ,max_noise_level=350 ,)
SCREAMING_SNAKE_CASE =sd_pipe.to(snake_case )
sd_pipe.set_progress_bar_config(disable=snake_case )
SCREAMING_SNAKE_CASE ='A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE =torch.Generator(device=snake_case ).manual_seed(0 )
SCREAMING_SNAKE_CASE =sd_pipe(
[prompt] ,image=snake_case ,generator=snake_case ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type='np' ,)
SCREAMING_SNAKE_CASE =output.images
SCREAMING_SNAKE_CASE =torch.Generator(device=snake_case ).manual_seed(0 )
SCREAMING_SNAKE_CASE =sd_pipe(
[prompt] ,image=snake_case ,generator=snake_case ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type='np' ,return_dict=snake_case ,)[0]
SCREAMING_SNAKE_CASE =image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE =image_from_tuple[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE =low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
SCREAMING_SNAKE_CASE =np.array([0.3_113, 0.3_910, 0.4_272, 0.4_859, 0.5_061, 0.4_652, 0.5_362, 0.5_715, 0.5_661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _lowerCAmelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE ='cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE =self.dummy_cond_unet_upscale
SCREAMING_SNAKE_CASE =DDPMScheduler()
SCREAMING_SNAKE_CASE =DDIMScheduler(prediction_type='v_prediction' )
SCREAMING_SNAKE_CASE =self.dummy_vae
SCREAMING_SNAKE_CASE =self.dummy_text_encoder
SCREAMING_SNAKE_CASE =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE =self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0]
SCREAMING_SNAKE_CASE =Image.fromarray(np.uinta(snake_case ) ).convert('RGB' ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE =StableDiffusionUpscalePipeline(
unet=snake_case ,low_res_scheduler=snake_case ,scheduler=snake_case ,vae=snake_case ,text_encoder=snake_case ,tokenizer=snake_case ,max_noise_level=350 ,)
SCREAMING_SNAKE_CASE =sd_pipe.to(snake_case )
sd_pipe.set_progress_bar_config(disable=snake_case )
SCREAMING_SNAKE_CASE ='A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE =sd_pipe(
2 * [prompt] ,image=2 * [low_res_image] ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type='np' ,)
SCREAMING_SNAKE_CASE =output.images
assert image.shape[0] == 2
SCREAMING_SNAKE_CASE =torch.Generator(device=snake_case ).manual_seed(0 )
SCREAMING_SNAKE_CASE =sd_pipe(
[prompt] ,image=snake_case ,generator=snake_case ,num_images_per_prompt=2 ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type='np' ,)
SCREAMING_SNAKE_CASE =output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != 'cuda' ,'This test requires a GPU' )
def _lowerCAmelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE =self.dummy_cond_unet_upscale
SCREAMING_SNAKE_CASE =DDPMScheduler()
SCREAMING_SNAKE_CASE =DDIMScheduler(prediction_type='v_prediction' )
SCREAMING_SNAKE_CASE =self.dummy_vae
SCREAMING_SNAKE_CASE =self.dummy_text_encoder
SCREAMING_SNAKE_CASE =CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
SCREAMING_SNAKE_CASE =self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0]
SCREAMING_SNAKE_CASE =Image.fromarray(np.uinta(snake_case ) ).convert('RGB' ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
SCREAMING_SNAKE_CASE =unet.half()
SCREAMING_SNAKE_CASE =text_encoder.half()
# make sure here that pndm scheduler skips prk
SCREAMING_SNAKE_CASE =StableDiffusionUpscalePipeline(
unet=snake_case ,low_res_scheduler=snake_case ,scheduler=snake_case ,vae=snake_case ,text_encoder=snake_case ,tokenizer=snake_case ,max_noise_level=350 ,)
SCREAMING_SNAKE_CASE =sd_pipe.to(snake_case )
sd_pipe.set_progress_bar_config(disable=snake_case )
SCREAMING_SNAKE_CASE ='A painting of a squirrel eating a burger'
SCREAMING_SNAKE_CASE =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE =sd_pipe(
[prompt] ,image=snake_case ,generator=snake_case ,num_inference_steps=2 ,output_type='np' ,).images
SCREAMING_SNAKE_CASE =low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
"""simple docstring"""
def _lowerCAmelCase ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCAmelCase ( self : Any ):
SCREAMING_SNAKE_CASE =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
SCREAMING_SNAKE_CASE =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat.npy' )
SCREAMING_SNAKE_CASE ='stabilityai/stable-diffusion-x4-upscaler'
SCREAMING_SNAKE_CASE =StableDiffusionUpscalePipeline.from_pretrained(snake_case )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE ='a cat sitting on a park bench'
SCREAMING_SNAKE_CASE =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE =pipe(
prompt=snake_case ,image=snake_case ,generator=snake_case ,output_type='np' ,)
SCREAMING_SNAKE_CASE =output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def _lowerCAmelCase ( self : str ):
SCREAMING_SNAKE_CASE =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
SCREAMING_SNAKE_CASE =load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale'
'/upsampled_cat_fp16.npy' )
SCREAMING_SNAKE_CASE ='stabilityai/stable-diffusion-x4-upscaler'
SCREAMING_SNAKE_CASE =StableDiffusionUpscalePipeline.from_pretrained(
snake_case ,torch_dtype=torch.floataa ,)
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE ='a cat sitting on a park bench'
SCREAMING_SNAKE_CASE =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE =pipe(
prompt=snake_case ,image=snake_case ,generator=snake_case ,output_type='np' ,)
SCREAMING_SNAKE_CASE =output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def _lowerCAmelCase ( self : List[Any] ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE =load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-upscale/low_res_cat.png' )
SCREAMING_SNAKE_CASE ='stabilityai/stable-diffusion-x4-upscaler'
SCREAMING_SNAKE_CASE =StableDiffusionUpscalePipeline.from_pretrained(
snake_case ,torch_dtype=torch.floataa ,)
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE ='a cat sitting on a park bench'
SCREAMING_SNAKE_CASE =torch.manual_seed(0 )
SCREAMING_SNAKE_CASE =pipe(
prompt=snake_case ,image=snake_case ,generator=snake_case ,num_inference_steps=5 ,output_type='np' ,)
SCREAMING_SNAKE_CASE =torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 252 | 1 |
def _a ( UpperCAmelCase = 1000 ) -> int:
"""simple docstring"""
lowerCamelCase__ , lowerCamelCase__ : str = 1, 1
lowerCamelCase__ : Optional[Any] = []
for i in range(1 , n + 1 ):
lowerCamelCase__ : str = prev_numerator + 2 * prev_denominator
lowerCamelCase__ : str = prev_numerator + prev_denominator
if len(str(UpperCAmelCase ) ) > len(str(UpperCAmelCase ) ):
result.append(UpperCAmelCase )
lowerCamelCase__ : Optional[Any] = numerator
lowerCamelCase__ : Optional[Any] = denominator
return len(UpperCAmelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 315 |
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __SCREAMING_SNAKE_CASE ( lowerCAmelCase_ ):
_UpperCAmelCase : Optional[Any] = (DDPMScheduler,)
def __lowerCamelCase ( self : Optional[int] , **A : List[str] ) ->Union[str, Any]:
lowerCamelCase__ : int = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.00_01,
'''beta_end''': 0.02,
'''beta_schedule''': '''linear''',
'''variance_type''': '''fixed_small''',
'''clip_sample''': True,
}
config.update(**A )
return config
def __lowerCamelCase ( self : Any ) ->Optional[int]:
for timesteps in [1, 5, 1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=A )
def __lowerCamelCase ( self : str ) ->List[str]:
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] , [0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=A , beta_end=A )
def __lowerCamelCase ( self : Union[str, Any] ) ->str:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=A )
def __lowerCamelCase ( self : str ) ->Dict:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=A )
def __lowerCamelCase ( self : Dict ) ->str:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=A )
def __lowerCamelCase ( self : Tuple ) ->Optional[int]:
self.check_over_configs(thresholding=A )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=A , prediction_type=A , sample_max_value=A , )
def __lowerCamelCase ( self : Optional[Any] ) ->str:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=A )
def __lowerCamelCase ( self : List[str] ) ->Optional[Any]:
for t in [0, 5_0_0, 9_9_9]:
self.check_over_forward(time_step=A )
def __lowerCamelCase ( self : List[Any] ) ->Optional[int]:
lowerCamelCase__ : Tuple = self.scheduler_classes[0]
lowerCamelCase__ : Any = self.get_scheduler_config()
lowerCamelCase__ : Dict = scheduler_class(**A )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_8_7 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_9_9 ) - 0.02 ) ) < 1e-5
def __lowerCamelCase ( self : Union[str, Any] ) ->Optional[int]:
lowerCamelCase__ : int = self.scheduler_classes[0]
lowerCamelCase__ : Any = self.get_scheduler_config()
lowerCamelCase__ : Union[str, Any] = scheduler_class(**A )
lowerCamelCase__ : Dict = len(A )
lowerCamelCase__ : Any = self.dummy_model()
lowerCamelCase__ : List[Any] = self.dummy_sample_deter
lowerCamelCase__ : Optional[int] = torch.manual_seed(0 )
for t in reversed(range(A ) ):
# 1. predict noise residual
lowerCamelCase__ : Union[str, Any] = model(A , A )
# 2. predict previous mean of sample x_t-1
lowerCamelCase__ : Any = scheduler.step(A , A , A , generator=A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase__ : Union[str, Any] = pred_prev_sample
lowerCamelCase__ : Optional[Any] = torch.sum(torch.abs(A ) )
lowerCamelCase__ : Dict = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 2_58.96_06 ) < 1e-2
assert abs(result_mean.item() - 0.33_72 ) < 1e-3
def __lowerCamelCase ( self : List[Any] ) ->Union[str, Any]:
lowerCamelCase__ : Any = self.scheduler_classes[0]
lowerCamelCase__ : Dict = self.get_scheduler_config(prediction_type='''v_prediction''' )
lowerCamelCase__ : List[str] = scheduler_class(**A )
lowerCamelCase__ : str = len(A )
lowerCamelCase__ : int = self.dummy_model()
lowerCamelCase__ : Any = self.dummy_sample_deter
lowerCamelCase__ : List[str] = torch.manual_seed(0 )
for t in reversed(range(A ) ):
# 1. predict noise residual
lowerCamelCase__ : Tuple = model(A , A )
# 2. predict previous mean of sample x_t-1
lowerCamelCase__ : int = scheduler.step(A , A , A , generator=A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
lowerCamelCase__ : Union[str, Any] = pred_prev_sample
lowerCamelCase__ : Optional[int] = torch.sum(torch.abs(A ) )
lowerCamelCase__ : Optional[Any] = torch.mean(torch.abs(A ) )
assert abs(result_sum.item() - 2_02.02_96 ) < 1e-2
assert abs(result_mean.item() - 0.26_31 ) < 1e-3
def __lowerCamelCase ( self : int ) ->Tuple:
lowerCamelCase__ : List[Any] = self.scheduler_classes[0]
lowerCamelCase__ : int = self.get_scheduler_config()
lowerCamelCase__ : Tuple = scheduler_class(**A )
lowerCamelCase__ : Optional[Any] = [1_0_0, 8_7, 5_0, 1, 0]
scheduler.set_timesteps(timesteps=A )
lowerCamelCase__ : int = scheduler.timesteps
for i, timestep in enumerate(A ):
if i == len(A ) - 1:
lowerCamelCase__ : Any = -1
else:
lowerCamelCase__ : int = timesteps[i + 1]
lowerCamelCase__ : Optional[int] = scheduler.previous_timestep(A )
lowerCamelCase__ : Optional[int] = prev_t.item()
self.assertEqual(A , A )
def __lowerCamelCase ( self : str ) ->Optional[int]:
lowerCamelCase__ : List[Any] = self.scheduler_classes[0]
lowerCamelCase__ : Any = self.get_scheduler_config()
lowerCamelCase__ : int = scheduler_class(**A )
lowerCamelCase__ : Any = [1_0_0, 8_7, 5_0, 5_1, 0]
with self.assertRaises(A , msg='''`custom_timesteps` must be in descending order.''' ):
scheduler.set_timesteps(timesteps=A )
def __lowerCamelCase ( self : Any ) ->str:
lowerCamelCase__ : Optional[int] = self.scheduler_classes[0]
lowerCamelCase__ : List[Any] = self.get_scheduler_config()
lowerCamelCase__ : Tuple = scheduler_class(**A )
lowerCamelCase__ : List[str] = [1_0_0, 8_7, 5_0, 1, 0]
lowerCamelCase__ : Optional[int] = len(A )
with self.assertRaises(A , msg='''Can only pass one of `num_inference_steps` or `custom_timesteps`.''' ):
scheduler.set_timesteps(num_inference_steps=A , timesteps=A )
def __lowerCamelCase ( self : List[Any] ) ->Dict:
lowerCamelCase__ : Optional[Any] = self.scheduler_classes[0]
lowerCamelCase__ : Union[str, Any] = self.get_scheduler_config()
lowerCamelCase__ : str = scheduler_class(**A )
lowerCamelCase__ : Union[str, Any] = [scheduler.config.num_train_timesteps]
with self.assertRaises(
A , msg='''`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}''' , ):
scheduler.set_timesteps(timesteps=A )
| 315 | 1 |
'''simple docstring'''
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
a_ = logging.get_logger(__name__)
enable_full_determinism()
class UpperCAmelCase_ ( snake_case__ , snake_case__ , unittest.TestCase ):
UpperCAmelCase_ = UNetaDModel
UpperCAmelCase_ = """sample"""
@property
def snake_case__ ( self):
snake_case_ : Optional[Any] = 4
snake_case_ : int = 3
snake_case_ : Dict = (32, 32)
snake_case_ : str = floats_tensor((batch_size, num_channels) + sizes).to(lowercase_)
snake_case_ : Union[str, Any] = torch.tensor([10]).to(lowercase_)
return {"sample": noise, "timestep": time_step}
@property
def snake_case__ ( self):
return (3, 32, 32)
@property
def snake_case__ ( self):
return (3, 32, 32)
def snake_case__ ( self):
snake_case_ : str = {
"block_out_channels": (32, 64),
"down_block_types": ("DownBlock2D", "AttnDownBlock2D"),
"up_block_types": ("AttnUpBlock2D", "UpBlock2D"),
"attention_head_dim": 3,
"out_channels": 3,
"in_channels": 3,
"layers_per_block": 2,
"sample_size": 32,
}
snake_case_ : List[Any] = self.dummy_input
return init_dict, inputs_dict
class UpperCAmelCase_ ( snake_case__ , snake_case__ , unittest.TestCase ):
UpperCAmelCase_ = UNetaDModel
UpperCAmelCase_ = """sample"""
@property
def snake_case__ ( self):
snake_case_ : List[Any] = 4
snake_case_ : Dict = 4
snake_case_ : Dict = (32, 32)
snake_case_ : Dict = floats_tensor((batch_size, num_channels) + sizes).to(lowercase_)
snake_case_ : Any = torch.tensor([10]).to(lowercase_)
return {"sample": noise, "timestep": time_step}
@property
def snake_case__ ( self):
return (4, 32, 32)
@property
def snake_case__ ( self):
return (4, 32, 32)
def snake_case__ ( self):
snake_case_ : Any = {
"sample_size": 32,
"in_channels": 4,
"out_channels": 4,
"layers_per_block": 2,
"block_out_channels": (32, 64),
"attention_head_dim": 32,
"down_block_types": ("DownBlock2D", "DownBlock2D"),
"up_block_types": ("UpBlock2D", "UpBlock2D"),
}
snake_case_ : Any = self.dummy_input
return init_dict, inputs_dict
def snake_case__ ( self):
snake_case_ , snake_case_ : Tuple = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=lowercase_)
self.assertIsNotNone(lowercase_)
self.assertEqual(len(loading_info["missing_keys"]) , 0)
model.to(lowercase_)
snake_case_ : int = model(**self.dummy_input).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU")
def snake_case__ ( self):
snake_case_ , snake_case_ : Optional[Any] = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=lowercase_)
model.to(lowercase_)
snake_case_ : str = model(**self.dummy_input).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU")
def snake_case__ ( self):
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
snake_case_ , snake_case_ : Any = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=lowercase_)
model_accelerate.to(lowercase_)
model_accelerate.eval()
snake_case_ : Optional[int] = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0) , )
snake_case_ : List[str] = noise.to(lowercase_)
snake_case_ : Any = torch.tensor([10] * noise.shape[0]).to(lowercase_)
snake_case_ : Optional[int] = model_accelerate(lowercase_ , lowercase_)["sample"]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
snake_case_ , snake_case_ : Union[str, Any] = UNetaDModel.from_pretrained(
"fusing/unet-ldm-dummy-update" , output_loading_info=lowercase_ , low_cpu_mem_usage=lowercase_)
model_normal_load.to(lowercase_)
model_normal_load.eval()
snake_case_ : Optional[Any] = model_normal_load(lowercase_ , lowercase_)["sample"]
assert torch_all_close(lowercase_ , lowercase_ , rtol=1E-3)
def snake_case__ ( self):
snake_case_ : Any = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update")
model.eval()
model.to(lowercase_)
snake_case_ : List[str] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0) , )
snake_case_ : List[str] = noise.to(lowercase_)
snake_case_ : str = torch.tensor([10] * noise.shape[0]).to(lowercase_)
with torch.no_grad():
snake_case_ : Tuple = model(lowercase_ , lowercase_).sample
snake_case_ : Optional[int] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
snake_case_ : Tuple = torch.tensor([-13.3_258, -20.1_100, -15.9_873, -17.6_617, -23.0_596, -17.9_419, -13.3_675, -16.1_889, -12.3_800])
# fmt: on
self.assertTrue(torch_all_close(lowercase_ , lowercase_ , rtol=1E-3))
class UpperCAmelCase_ ( snake_case__ , snake_case__ , unittest.TestCase ):
UpperCAmelCase_ = UNetaDModel
UpperCAmelCase_ = """sample"""
@property
def snake_case__ ( self , lowercase_=(32, 32)):
snake_case_ : List[Any] = 4
snake_case_ : str = 3
snake_case_ : str = floats_tensor((batch_size, num_channels) + sizes).to(lowercase_)
snake_case_ : Dict = torch.tensor(batch_size * [10]).to(dtype=torch.intaa , device=lowercase_)
return {"sample": noise, "timestep": time_step}
@property
def snake_case__ ( self):
return (3, 32, 32)
@property
def snake_case__ ( self):
return (3, 32, 32)
def snake_case__ ( self):
snake_case_ : List[str] = {
"block_out_channels": [32, 64, 64, 64],
"in_channels": 3,
"layers_per_block": 1,
"out_channels": 3,
"time_embedding_type": "fourier",
"norm_eps": 1E-6,
"mid_block_scale_factor": math.sqrt(2.0),
"norm_num_groups": None,
"down_block_types": [
"SkipDownBlock2D",
"AttnSkipDownBlock2D",
"SkipDownBlock2D",
"SkipDownBlock2D",
],
"up_block_types": [
"SkipUpBlock2D",
"SkipUpBlock2D",
"AttnSkipUpBlock2D",
"SkipUpBlock2D",
],
}
snake_case_ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
@slow
def snake_case__ ( self):
snake_case_ , snake_case_ : Optional[int] = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" , output_loading_info=lowercase_)
self.assertIsNotNone(lowercase_)
self.assertEqual(len(loading_info["missing_keys"]) , 0)
model.to(lowercase_)
snake_case_ : Dict = self.dummy_input
snake_case_ : Tuple = floats_tensor((4, 3) + (2_56, 2_56)).to(lowercase_)
snake_case_ : Tuple = noise
snake_case_ : Tuple = model(**lowercase_)
assert image is not None, "Make sure output is not None"
@slow
def snake_case__ ( self):
snake_case_ : Dict = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256")
model.to(lowercase_)
snake_case_ : List[Any] = 4
snake_case_ : str = 3
snake_case_ : Dict = (2_56, 2_56)
snake_case_ : Tuple = torch.ones((batch_size, num_channels) + sizes).to(lowercase_)
snake_case_ : List[Any] = torch.tensor(batch_size * [1E-4]).to(lowercase_)
with torch.no_grad():
snake_case_ : int = model(lowercase_ , lowercase_).sample
snake_case_ : List[str] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
snake_case_ : Union[str, Any] = torch.tensor([-4_842.8_691, -6_499.6_631, -3_800.1_953, -7_978.2_686, -10_980.7_129, -20_028.8_535, 8_148.2_822, 2_342.2_905, 567.7_608])
# fmt: on
self.assertTrue(torch_all_close(lowercase_ , lowercase_ , rtol=1E-2))
def snake_case__ ( self):
snake_case_ : List[Any] = UNetaDModel.from_pretrained("fusing/ncsnpp-ffhq-ve-dummy-update")
model.to(lowercase_)
snake_case_ : Dict = 4
snake_case_ : str = 3
snake_case_ : List[Any] = (32, 32)
snake_case_ : int = torch.ones((batch_size, num_channels) + sizes).to(lowercase_)
snake_case_ : List[Any] = torch.tensor(batch_size * [1E-4]).to(lowercase_)
with torch.no_grad():
snake_case_ : Optional[Any] = model(lowercase_ , lowercase_).sample
snake_case_ : List[Any] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
snake_case_ : Union[str, Any] = torch.tensor([-0.0_325, -0.0_900, -0.0_869, -0.0_332, -0.0_725, -0.0_270, -0.0_101, 0.0_227, 0.0_256])
# fmt: on
self.assertTrue(torch_all_close(lowercase_ , lowercase_ , rtol=1E-2))
def snake_case__ ( self):
# not required for this model
pass
| 92 |
'''simple docstring'''
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
a_ = logging.get_logger(__name__)
enable_full_determinism()
class UpperCAmelCase_ ( snake_case__ , snake_case__ , unittest.TestCase ):
UpperCAmelCase_ = UNetaDModel
UpperCAmelCase_ = """sample"""
@property
def snake_case__ ( self):
snake_case_ : Optional[Any] = 4
snake_case_ : int = 3
snake_case_ : Dict = (32, 32)
snake_case_ : str = floats_tensor((batch_size, num_channels) + sizes).to(lowercase_)
snake_case_ : Union[str, Any] = torch.tensor([10]).to(lowercase_)
return {"sample": noise, "timestep": time_step}
@property
def snake_case__ ( self):
return (3, 32, 32)
@property
def snake_case__ ( self):
return (3, 32, 32)
def snake_case__ ( self):
snake_case_ : str = {
"block_out_channels": (32, 64),
"down_block_types": ("DownBlock2D", "AttnDownBlock2D"),
"up_block_types": ("AttnUpBlock2D", "UpBlock2D"),
"attention_head_dim": 3,
"out_channels": 3,
"in_channels": 3,
"layers_per_block": 2,
"sample_size": 32,
}
snake_case_ : List[Any] = self.dummy_input
return init_dict, inputs_dict
class UpperCAmelCase_ ( snake_case__ , snake_case__ , unittest.TestCase ):
UpperCAmelCase_ = UNetaDModel
UpperCAmelCase_ = """sample"""
@property
def snake_case__ ( self):
snake_case_ : List[Any] = 4
snake_case_ : Dict = 4
snake_case_ : Dict = (32, 32)
snake_case_ : Dict = floats_tensor((batch_size, num_channels) + sizes).to(lowercase_)
snake_case_ : Any = torch.tensor([10]).to(lowercase_)
return {"sample": noise, "timestep": time_step}
@property
def snake_case__ ( self):
return (4, 32, 32)
@property
def snake_case__ ( self):
return (4, 32, 32)
def snake_case__ ( self):
snake_case_ : Any = {
"sample_size": 32,
"in_channels": 4,
"out_channels": 4,
"layers_per_block": 2,
"block_out_channels": (32, 64),
"attention_head_dim": 32,
"down_block_types": ("DownBlock2D", "DownBlock2D"),
"up_block_types": ("UpBlock2D", "UpBlock2D"),
}
snake_case_ : Any = self.dummy_input
return init_dict, inputs_dict
def snake_case__ ( self):
snake_case_ , snake_case_ : Tuple = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=lowercase_)
self.assertIsNotNone(lowercase_)
self.assertEqual(len(loading_info["missing_keys"]) , 0)
model.to(lowercase_)
snake_case_ : int = model(**self.dummy_input).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU")
def snake_case__ ( self):
snake_case_ , snake_case_ : Optional[Any] = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=lowercase_)
model.to(lowercase_)
snake_case_ : str = model(**self.dummy_input).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda" , "This test is supposed to run on GPU")
def snake_case__ ( self):
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
snake_case_ , snake_case_ : Any = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" , output_loading_info=lowercase_)
model_accelerate.to(lowercase_)
model_accelerate.eval()
snake_case_ : Optional[int] = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0) , )
snake_case_ : List[str] = noise.to(lowercase_)
snake_case_ : Any = torch.tensor([10] * noise.shape[0]).to(lowercase_)
snake_case_ : Optional[int] = model_accelerate(lowercase_ , lowercase_)["sample"]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
snake_case_ , snake_case_ : Union[str, Any] = UNetaDModel.from_pretrained(
"fusing/unet-ldm-dummy-update" , output_loading_info=lowercase_ , low_cpu_mem_usage=lowercase_)
model_normal_load.to(lowercase_)
model_normal_load.eval()
snake_case_ : Optional[Any] = model_normal_load(lowercase_ , lowercase_)["sample"]
assert torch_all_close(lowercase_ , lowercase_ , rtol=1E-3)
def snake_case__ ( self):
snake_case_ : Any = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update")
model.eval()
model.to(lowercase_)
snake_case_ : List[str] = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0) , )
snake_case_ : List[str] = noise.to(lowercase_)
snake_case_ : str = torch.tensor([10] * noise.shape[0]).to(lowercase_)
with torch.no_grad():
snake_case_ : Tuple = model(lowercase_ , lowercase_).sample
snake_case_ : Optional[int] = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
snake_case_ : Tuple = torch.tensor([-13.3_258, -20.1_100, -15.9_873, -17.6_617, -23.0_596, -17.9_419, -13.3_675, -16.1_889, -12.3_800])
# fmt: on
self.assertTrue(torch_all_close(lowercase_ , lowercase_ , rtol=1E-3))
class UpperCAmelCase_ ( snake_case__ , snake_case__ , unittest.TestCase ):
UpperCAmelCase_ = UNetaDModel
UpperCAmelCase_ = """sample"""
@property
def snake_case__ ( self , lowercase_=(32, 32)):
snake_case_ : List[Any] = 4
snake_case_ : str = 3
snake_case_ : str = floats_tensor((batch_size, num_channels) + sizes).to(lowercase_)
snake_case_ : Dict = torch.tensor(batch_size * [10]).to(dtype=torch.intaa , device=lowercase_)
return {"sample": noise, "timestep": time_step}
@property
def snake_case__ ( self):
return (3, 32, 32)
@property
def snake_case__ ( self):
return (3, 32, 32)
def snake_case__ ( self):
snake_case_ : List[str] = {
"block_out_channels": [32, 64, 64, 64],
"in_channels": 3,
"layers_per_block": 1,
"out_channels": 3,
"time_embedding_type": "fourier",
"norm_eps": 1E-6,
"mid_block_scale_factor": math.sqrt(2.0),
"norm_num_groups": None,
"down_block_types": [
"SkipDownBlock2D",
"AttnSkipDownBlock2D",
"SkipDownBlock2D",
"SkipDownBlock2D",
],
"up_block_types": [
"SkipUpBlock2D",
"SkipUpBlock2D",
"AttnSkipUpBlock2D",
"SkipUpBlock2D",
],
}
snake_case_ : Optional[Any] = self.dummy_input
return init_dict, inputs_dict
@slow
def snake_case__ ( self):
snake_case_ , snake_case_ : Optional[int] = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" , output_loading_info=lowercase_)
self.assertIsNotNone(lowercase_)
self.assertEqual(len(loading_info["missing_keys"]) , 0)
model.to(lowercase_)
snake_case_ : Dict = self.dummy_input
snake_case_ : Tuple = floats_tensor((4, 3) + (2_56, 2_56)).to(lowercase_)
snake_case_ : Tuple = noise
snake_case_ : Tuple = model(**lowercase_)
assert image is not None, "Make sure output is not None"
@slow
def snake_case__ ( self):
snake_case_ : Dict = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256")
model.to(lowercase_)
snake_case_ : List[Any] = 4
snake_case_ : str = 3
snake_case_ : Dict = (2_56, 2_56)
snake_case_ : Tuple = torch.ones((batch_size, num_channels) + sizes).to(lowercase_)
snake_case_ : List[Any] = torch.tensor(batch_size * [1E-4]).to(lowercase_)
with torch.no_grad():
snake_case_ : int = model(lowercase_ , lowercase_).sample
snake_case_ : List[str] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
snake_case_ : Union[str, Any] = torch.tensor([-4_842.8_691, -6_499.6_631, -3_800.1_953, -7_978.2_686, -10_980.7_129, -20_028.8_535, 8_148.2_822, 2_342.2_905, 567.7_608])
# fmt: on
self.assertTrue(torch_all_close(lowercase_ , lowercase_ , rtol=1E-2))
def snake_case__ ( self):
snake_case_ : List[Any] = UNetaDModel.from_pretrained("fusing/ncsnpp-ffhq-ve-dummy-update")
model.to(lowercase_)
snake_case_ : Dict = 4
snake_case_ : str = 3
snake_case_ : List[Any] = (32, 32)
snake_case_ : int = torch.ones((batch_size, num_channels) + sizes).to(lowercase_)
snake_case_ : List[Any] = torch.tensor(batch_size * [1E-4]).to(lowercase_)
with torch.no_grad():
snake_case_ : Optional[Any] = model(lowercase_ , lowercase_).sample
snake_case_ : List[Any] = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
snake_case_ : Union[str, Any] = torch.tensor([-0.0_325, -0.0_900, -0.0_869, -0.0_332, -0.0_725, -0.0_270, -0.0_101, 0.0_227, 0.0_256])
# fmt: on
self.assertTrue(torch_all_close(lowercase_ , lowercase_ , rtol=1E-2))
def snake_case__ ( self):
# not required for this model
pass
| 92 | 1 |
'''simple docstring'''
class UpperCAmelCase :
def __init__( self :List[Any] )-> int:
A__ = {}
def UpperCAmelCase_ ( self :str )-> None:
print(self.vertex )
for i in self.vertex:
print(lowercase_ , " -> " , " -> ".join([str(lowercase_ ) for j in self.vertex[i]] ) )
def UpperCAmelCase_ ( self :List[Any] , lowercase_ :List[Any] , lowercase_ :str )-> None:
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(lowercase_ )
else:
# else make a new vertex
A__ = [to_vertex]
def UpperCAmelCase_ ( self :Optional[int] )-> None:
A__ = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(lowercase_ , lowercase_ )
def UpperCAmelCase_ ( self :Optional[int] , lowercase_ :Dict , lowercase_ :Optional[int] )-> None:
A__ = True
print(lowercase_ , end=" " )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(lowercase_ , lowercase_ )
if __name__ == "__main__":
__lowerCAmelCase : Tuple =Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("DFS:")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 440 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
a : Union[List[PIL.Image.Image], np.ndarray]
a : Optional[List[bool]]
a : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 253 | 0 |
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
__UpperCAmelCase = False
__UpperCAmelCase = True
__UpperCAmelCase = False
if __name__ == "__main__":
__UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument(
"""--repo_path""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the architecture.""",
)
parser.add_argument("""--dump_path""", default=None, type=str, required=True, help="""Path to the output model.""")
__UpperCAmelCase = parser.parse_args()
__UpperCAmelCase = {
"""image_size""": """sample_size""",
"""num_res_blocks""": """layers_per_block""",
"""block_channels""": """block_out_channels""",
"""down_blocks""": """down_block_types""",
"""up_blocks""": """up_block_types""",
"""downscale_freq_shift""": """freq_shift""",
"""resnet_num_groups""": """norm_num_groups""",
"""resnet_act_fn""": """act_fn""",
"""resnet_eps""": """norm_eps""",
"""num_head_channels""": """attention_head_dim""",
}
__UpperCAmelCase = {
"""time_steps""": """time_proj""",
"""mid""": """mid_block""",
"""downsample_blocks""": """down_blocks""",
"""upsample_blocks""": """up_blocks""",
}
__UpperCAmelCase = """""" if has_file(args.repo_path, """config.json""") else """unet"""
with open(os.path.join(args.repo_path, subfolder, """config.json"""), """r""", encoding="""utf-8""") as reader:
__UpperCAmelCase = reader.read()
__UpperCAmelCase = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, """config.json"""):
__UpperCAmelCase = UNetaDModel(**config)
else:
__UpperCAmelCase = UNetaDConditionModel if """ldm-text2im-large-256""" in args.repo_path else UNetaDModel
__UpperCAmelCase = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
__UpperCAmelCase = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
__UpperCAmelCase = config[key]
del config[key]
__UpperCAmelCase = [k.replace("""UNetRes""", """""") for k in config["""down_block_types"""]]
__UpperCAmelCase = [k.replace("""UNetRes""", """""") for k in config["""up_block_types"""]]
if do_only_weights:
__UpperCAmelCase = torch.load(os.path.join(args.repo_path, subfolder, """diffusion_pytorch_model.bin"""))
__UpperCAmelCase = {}
for param_key, param_value in state_dict.items():
if param_key.endswith(""".op.bias""") or param_key.endswith(""".op.weight"""):
continue
__UpperCAmelCase = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split(""".""")[0] == key:
__UpperCAmelCase = param_value
__UpperCAmelCase = True
if not has_changed:
__UpperCAmelCase = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 718 |
def _lowerCamelCase ( A_ : int , A_ : int ) -> int:
'''simple docstring'''
return int((input_a, input_a).count(0 ) != 0 )
def _lowerCamelCase ( ) -> None:
'''simple docstring'''
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 582 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase_ = logging.get_logger(__name__)
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ):
snake_case_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
('''cls_token''', '''vit.embeddings.cls_token'''),
('''patch_embed.proj.weight''', '''vit.embeddings.patch_embeddings.projection.weight'''),
('''patch_embed.proj.bias''', '''vit.embeddings.patch_embeddings.projection.bias'''),
('''pos_embed''', '''vit.embeddings.position_embeddings'''),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('''norm.weight''', '''layernorm.weight'''),
('''norm.bias''', '''layernorm.bias'''),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case_ = [(pair[0], pair[1][4:]) if pair[1].startswith('''vit''' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('''norm.weight''', '''vit.layernorm.weight'''),
('''norm.bias''', '''vit.layernorm.bias'''),
('''head.weight''', '''classifier.weight'''),
('''head.bias''', '''classifier.bias'''),
] )
return rename_keys
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ):
for i in range(config.num_hidden_layers ):
if base_model:
snake_case_ = ''''''
else:
snake_case_ = '''vit.'''
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case_ = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
snake_case_ = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case_ = in_proj_weight[
: config.hidden_size, :
]
snake_case_ = in_proj_bias[: config.hidden_size]
snake_case_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case_ = in_proj_weight[
-config.hidden_size :, :
]
snake_case_ = in_proj_bias[-config.hidden_size :]
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ ):
snake_case_ = ['''head.weight''', '''head.bias''']
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
snake_case_ = dct.pop(SCREAMING_SNAKE_CASE__ )
snake_case_ = val
def __SCREAMING_SNAKE_CASE ():
snake_case_ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case_ = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=True ):
snake_case_ = ViTConfig()
# patch_size
if model_name[-1] == "8":
snake_case_ = 8
# set labels if required
if not base_model:
snake_case_ = 1000
snake_case_ = '''huggingface/label-files'''
snake_case_ = '''imagenet-1k-id2label.json'''
snake_case_ = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type='''dataset''' ) , '''r''' ) )
snake_case_ = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
snake_case_ = idalabel
snake_case_ = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
snake_case_ = 384
snake_case_ = 1536
snake_case_ = 12
snake_case_ = 6
# load original model from torch hub
snake_case_ = torch.hub.load('''facebookresearch/dino:main''' , SCREAMING_SNAKE_CASE__ )
original_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case_ = original_model.state_dict()
if base_model:
remove_classification_head_(SCREAMING_SNAKE_CASE__ )
snake_case_ = create_rename_keys(SCREAMING_SNAKE_CASE__ , base_model=SCREAMING_SNAKE_CASE__ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
read_in_q_k_v(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# load HuggingFace model
if base_model:
snake_case_ = ViTModel(SCREAMING_SNAKE_CASE__ , add_pooling_layer=SCREAMING_SNAKE_CASE__ ).eval()
else:
snake_case_ = ViTForImageClassification(SCREAMING_SNAKE_CASE__ ).eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# Check outputs on an image, prepared by ViTImageProcessor
snake_case_ = ViTImageProcessor()
snake_case_ = image_processor(images=prepare_img() , return_tensors='''pt''' )
snake_case_ = encoding['''pixel_values''']
snake_case_ = model(SCREAMING_SNAKE_CASE__ )
if base_model:
snake_case_ = original_model(SCREAMING_SNAKE_CASE__ )
assert torch.allclose(SCREAMING_SNAKE_CASE__ , outputs.last_hidden_state[:, 0, :] , atol=1E-1 )
else:
snake_case_ = original_model(SCREAMING_SNAKE_CASE__ )
assert logits.shape == outputs.logits.shape
assert torch.allclose(SCREAMING_SNAKE_CASE__ , outputs.logits , atol=1E-3 )
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
print(F'''Saving model {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if __name__ == "__main__":
lowerCAmelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''dino_vitb16''',
type=str,
help='''Name of the model trained with DINO you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--base_model''',
action='''store_true''',
help='''Whether to only convert the base model (no projection head weights).''',
)
parser.set_defaults(base_model=True)
lowerCAmelCase_ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model) | 39 |
import json
import os
import unittest
from transformers import BatchEncoding, MvpTokenizer, MvpTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin, filter_roberta_detectors
@require_tokenizers
class lowercase_ ( A , unittest.TestCase ):
__lowerCamelCase = MvpTokenizer
__lowerCamelCase = MvpTokenizerFast
__lowerCamelCase = True
__lowerCamelCase = filter_roberta_detectors
def _snake_case ( self ) -> int:
super().setUp()
SCREAMING_SNAKE_CASE_ : List[str] =[
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''<unk>''',
]
SCREAMING_SNAKE_CASE_ : Optional[int] =dict(zip(__A , range(len(__A ) ) ) )
SCREAMING_SNAKE_CASE_ : int =['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
SCREAMING_SNAKE_CASE_ : Dict ={'''unk_token''': '''<unk>'''}
SCREAMING_SNAKE_CASE_ : Any =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE_ : Optional[int] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__A ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__A ) )
def _snake_case ( self , **__A ) -> int:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__A )
def _snake_case ( self , **__A ) -> Any:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **__A )
def _snake_case ( self , __A ) -> Any:
return "lower newer", "lower newer"
@cached_property
def _snake_case ( self ) -> Union[str, Any]:
return MvpTokenizer.from_pretrained('''RUCAIBox/mvp''' )
@cached_property
def _snake_case ( self ) -> Optional[int]:
return MvpTokenizerFast.from_pretrained('''RUCAIBox/mvp''' )
@require_torch
def _snake_case ( self ) -> List[str]:
SCREAMING_SNAKE_CASE_ : str =['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
SCREAMING_SNAKE_CASE_ : Tuple =[0, 250, 251, 17_818, 13, 39_186, 1_938, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE_ : List[str] =tokenizer(__A , max_length=len(__A ) , padding=__A , return_tensors='''pt''' )
self.assertIsInstance(__A , __A )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE_ : Tuple =batch.input_ids.tolist()[0]
self.assertListEqual(__A , __A )
# Test that special tokens are reset
@require_torch
def _snake_case ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ : Optional[int] =['''A long paragraph for summarization.''', '''Another paragraph for summarization.''']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE_ : Dict =tokenizer(__A , padding=__A , return_tensors='''pt''' )
# check if input_ids are returned and no labels
self.assertIn('''input_ids''' , __A )
self.assertIn('''attention_mask''' , __A )
self.assertNotIn('''labels''' , __A )
self.assertNotIn('''decoder_attention_mask''' , __A )
@require_torch
def _snake_case ( self ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ : Any =[
'''Summary of the text.''',
'''Another summary.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE_ : List[str] =tokenizer(text_target=__A , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def _snake_case ( self ) -> str:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE_ : List[Any] =tokenizer(
['''I am a small frog''' * 1_024, '''I am a small frog'''] , padding=__A , truncation=__A , return_tensors='''pt''' )
self.assertIsInstance(__A , __A )
self.assertEqual(batch.input_ids.shape , (2, 1_024) )
@require_torch
def _snake_case ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ : str =['''A long paragraph for summarization.''']
SCREAMING_SNAKE_CASE_ : Optional[int] =[
'''Summary of the text.''',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] =tokenizer(__A , text_target=__A , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE_ : Tuple =inputs['''input_ids''']
SCREAMING_SNAKE_CASE_ : Union[str, Any] =inputs['''labels''']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
def _snake_case ( self ) -> Any:
pass
def _snake_case ( self ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
SCREAMING_SNAKE_CASE_ : Dict =self.rust_tokenizer_class.from_pretrained(__A , **__A )
SCREAMING_SNAKE_CASE_ : Optional[Any] =self.tokenizer_class.from_pretrained(__A , **__A )
SCREAMING_SNAKE_CASE_ : List[Any] ='''A, <mask> AllenNLP sentence.'''
SCREAMING_SNAKE_CASE_ : Dict =tokenizer_r.encode_plus(__A , add_special_tokens=__A , return_token_type_ids=__A )
SCREAMING_SNAKE_CASE_ : Any =tokenizer_p.encode_plus(__A , add_special_tokens=__A , return_token_type_ids=__A )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
SCREAMING_SNAKE_CASE_ : Any =tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
SCREAMING_SNAKE_CASE_ : List[str] =tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 250, 6, 50_264, 3_823, 487, 21_992, 3_645, 4, 2] )
self.assertSequenceEqual(
__A , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
__A , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
| 443 | 0 |
"""simple docstring"""
import importlib
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Union
import torch
from ..utils import BaseOutput
UpperCamelCase_ : List[Any] = '''scheduler_config.json'''
class __lowerCAmelCase ( _lowercase ):
"""simple docstring"""
snake_case = 1
snake_case = 2
snake_case = 3
snake_case = 4
snake_case = 5
snake_case = 6
snake_case = 7
snake_case = 8
snake_case = 9
snake_case = 10
snake_case = 11
snake_case = 12
snake_case = 13
snake_case = 14
@dataclass
class __lowerCAmelCase ( _lowercase ):
"""simple docstring"""
snake_case = 42
class __lowerCAmelCase :
"""simple docstring"""
snake_case = SCHEDULER_CONFIG_NAME
snake_case = []
snake_case = True
@classmethod
def lowerCamelCase__ ( cls : str , _snake_case : Dict[str, Any] = None , _snake_case : Optional[str] = None , _snake_case : Tuple=False , **_snake_case : Optional[Any] , ) -> Tuple:
"""simple docstring"""
A_ , A_ , A_ = cls.load_config(
pretrained_model_name_or_path=_snake_case , subfolder=_snake_case , return_unused_kwargs=_snake_case , return_commit_hash=_snake_case , **_snake_case , )
return cls.from_config(_snake_case , return_unused_kwargs=_snake_case , **_snake_case )
def lowerCamelCase__ ( self : Union[str, Any] , _snake_case : Union[str, os.PathLike] , _snake_case : bool = False , **_snake_case : str ) -> Any:
"""simple docstring"""
self.save_config(save_directory=_snake_case , push_to_hub=_snake_case , **_snake_case )
@property
def lowerCamelCase__ ( self : Any ) -> int:
"""simple docstring"""
return self._get_compatibles()
@classmethod
def lowerCamelCase__ ( cls : Any ) -> Dict:
"""simple docstring"""
A_ = list(set([cls.__name__] + cls._compatibles ) )
A_ = importlib.import_module(__name__.split("." )[0] )
A_ = [
getattr(_snake_case , _snake_case ) for c in compatible_classes_str if hasattr(_snake_case , _snake_case )
]
return compatible_classes
| 482 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Union[str, Any] , _snake_case : Optional[int] , _snake_case : Any=13 , _snake_case : Union[str, Any]=2 , _snake_case : Optional[int]=24 , _snake_case : Optional[Any]=16 , _snake_case : List[str]=True , _snake_case : str=True , _snake_case : List[Any]=32 , _snake_case : str=5 , _snake_case : int=4 , _snake_case : List[str]=37 , _snake_case : int="gelu" , _snake_case : str=0.1 , _snake_case : Optional[int]=0.1 , _snake_case : Optional[int]=10 , _snake_case : int=0.0_2 , _snake_case : int=None , _snake_case : Optional[Any]=2 , _snake_case : int=2 , ) -> Dict:
"""simple docstring"""
A_ = parent
A_ = batch_size
A_ = patch_size
A_ = max_length
A_ = num_mel_bins
A_ = is_training
A_ = use_labels
A_ = hidden_size
A_ = num_hidden_layers
A_ = num_attention_heads
A_ = intermediate_size
A_ = hidden_act
A_ = hidden_dropout_prob
A_ = attention_probs_dropout_prob
A_ = type_sequence_label_size
A_ = initializer_range
A_ = scope
A_ = frequency_stride
A_ = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
A_ = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
A_ = (self.max_length - self.patch_size) // self.time_stride + 1
A_ = frequency_out_dimension * time_out_dimension
A_ = num_patches + 2
def lowerCamelCase__ ( self : Dict ) -> int:
"""simple docstring"""
A_ = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
A_ = None
if self.use_labels:
A_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
A_ = self.get_config()
return config, input_values, labels
def lowerCamelCase__ ( self : List[str] ) -> str:
"""simple docstring"""
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_snake_case , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def lowerCamelCase__ ( self : int , _snake_case : Dict , _snake_case : Tuple , _snake_case : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
A_ = ASTModel(config=_snake_case )
model.to(_snake_case )
model.eval()
A_ = model(_snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : str ) -> List[str]:
"""simple docstring"""
A_ = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) ,
) = config_and_inputs
A_ = {"input_values": input_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( _lowercase , _lowercase , unittest.TestCase ):
"""simple docstring"""
snake_case = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
snake_case = (
{"audio-classification": ASTForAudioClassification, "feature-extraction": ASTModel}
if is_torch_available()
else {}
)
snake_case = False
snake_case = False
snake_case = False
snake_case = False
def lowerCamelCase__ ( self : Any , _snake_case : Tuple , _snake_case : str , _snake_case : List[str] , _snake_case : int , _snake_case : int ) -> str:
"""simple docstring"""
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def lowerCamelCase__ ( self : str ) -> str:
"""simple docstring"""
A_ = ASTModelTester(self )
A_ = ConfigTester(self , config_class=_snake_case , has_text_modality=_snake_case , hidden_size=37 )
def lowerCamelCase__ ( self : List[str] ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="AST does not use inputs_embeds" )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
pass
def lowerCamelCase__ ( self : List[str] ) -> str:
"""simple docstring"""
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
A_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_snake_case , nn.Linear ) )
def lowerCamelCase__ ( self : Any ) -> Tuple:
"""simple docstring"""
A_ , A_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ = model_class(_snake_case )
A_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ = [*signature.parameters.keys()]
A_ = ["input_values"]
self.assertListEqual(arg_names[:1] , _snake_case )
def lowerCamelCase__ ( self : Tuple ) -> Optional[int]:
"""simple docstring"""
A_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_snake_case )
@slow
def lowerCamelCase__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ = ASTModel.from_pretrained(_snake_case )
self.assertIsNotNone(_snake_case )
def A_ ():
'''simple docstring'''
A_ = hf_hub_download(
repo_id="nielsr/audio-spectogram-transformer-checkpoint" , filename="sample_audio.flac" , repo_type="dataset" )
A_ , A_ = torchaudio.load(__a )
return audio, sampling_rate
@require_torch
@require_torchaudio
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase__ ( self : List[str] ) -> int:
"""simple docstring"""
return (
ASTFeatureExtractor.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" )
if is_torchaudio_available()
else None
)
@slow
def lowerCamelCase__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
A_ = self.default_feature_extractor
A_ = ASTForAudioClassification.from_pretrained("MIT/ast-finetuned-audioset-10-10-0.4593" ).to(_snake_case )
A_ = self.default_feature_extractor
A_ , A_ = prepare_audio()
A_ = audio.squeeze().numpy()
A_ = feature_extractor(_snake_case , sampling_rate=_snake_case , return_tensors="pt" ).to(_snake_case )
# forward pass
with torch.no_grad():
A_ = model(**_snake_case )
# verify the logits
A_ = torch.Size((1, 527) )
self.assertEqual(outputs.logits.shape , _snake_case )
A_ = torch.tensor([-0.8_7_6_0, -7.0_0_4_2, -8.6_6_0_2] ).to(_snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _snake_case , atol=1e-4 ) )
| 482 | 1 |
'''simple docstring'''
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def A ( UpperCamelCase_ : Dict ) -> Tuple:
'''simple docstring'''
lowerCAmelCase__ = fname.split(os.path.sep )[-1]
return re.search(r"^(.*)_\d+\.jpg$" , UpperCamelCase_ ).groups()[0]
class A ( SCREAMING_SNAKE_CASE__ ):
def __init__( self : List[Any] , __magic_name__ : Dict , __magic_name__ : Any=None , __magic_name__ : List[Any]=None ):
"""simple docstring"""
lowerCAmelCase__ = file_names
lowerCAmelCase__ = image_transform
lowerCAmelCase__ = label_to_id
def __len__( self : Any ):
"""simple docstring"""
return len(self.file_names )
def __getitem__( self : List[Any] , __magic_name__ : List[Any] ):
"""simple docstring"""
lowerCAmelCase__ = self.file_names[idx]
lowerCAmelCase__ = PIL.Image.open(__magic_name__ )
lowerCAmelCase__ = raw_image.convert("RGB" )
if self.image_transform is not None:
lowerCAmelCase__ = self.image_transform(__magic_name__ )
lowerCAmelCase__ = extract_label(__magic_name__ )
if self.label_to_id is not None:
lowerCAmelCase__ = self.label_to_id[label]
return {"image": image, "label": label}
def A ( UpperCamelCase_ : Tuple , UpperCamelCase_ : List[str] ) -> List[str]:
'''simple docstring'''
if args.with_tracking:
lowerCAmelCase__ = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
lowerCAmelCase__ = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ = config["lr"]
lowerCAmelCase__ = int(config["num_epochs"] )
lowerCAmelCase__ = int(config["seed"] )
lowerCAmelCase__ = int(config["batch_size"] )
lowerCAmelCase__ = config["image_size"]
if not isinstance(UpperCamelCase_ , (list, tuple) ):
lowerCAmelCase__ = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , "isdigit" ):
if args.checkpointing_steps == "epoch":
lowerCAmelCase__ = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
lowerCAmelCase__ = int(args.checkpointing_steps )
else:
raise ValueError(
F"""Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.""" )
else:
lowerCAmelCase__ = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
lowerCAmelCase__ = os.path.split(UpperCamelCase_ )[-1].split("." )[0]
accelerator.init_trackers(UpperCamelCase_ , UpperCamelCase_ )
# Grab all the image filenames
lowerCAmelCase__ = [os.path.join(args.data_dir , UpperCamelCase_ ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
lowerCAmelCase__ = [extract_label(UpperCamelCase_ ) for fname in file_names]
lowerCAmelCase__ = list(set(UpperCamelCase_ ) )
id_to_label.sort()
lowerCAmelCase__ = {lbl: i for i, lbl in enumerate(UpperCamelCase_ )}
# Set the seed before splitting the data.
np.random.seed(UpperCamelCase_ )
torch.manual_seed(UpperCamelCase_ )
torch.cuda.manual_seed_all(UpperCamelCase_ )
# Split our filenames between train and validation
lowerCAmelCase__ = np.random.permutation(len(UpperCamelCase_ ) )
lowerCAmelCase__ = int(0.8 * len(UpperCamelCase_ ) )
lowerCAmelCase__ = random_perm[:cut]
lowerCAmelCase__ = random_perm[cut:]
# For training we use a simple RandomResizedCrop
lowerCAmelCase__ = Compose([RandomResizedCrop(UpperCamelCase_ , scale=(0.5, 1.0) ), ToTensor()] )
lowerCAmelCase__ = PetsDataset(
[file_names[i] for i in train_split] , image_transform=UpperCamelCase_ , label_to_id=UpperCamelCase_ )
# For evaluation, we use a deterministic Resize
lowerCAmelCase__ = Compose([Resize(UpperCamelCase_ ), ToTensor()] )
lowerCAmelCase__ = PetsDataset([file_names[i] for i in eval_split] , image_transform=UpperCamelCase_ , label_to_id=UpperCamelCase_ )
# Instantiate dataloaders.
lowerCAmelCase__ = DataLoader(UpperCamelCase_ , shuffle=UpperCamelCase_ , batch_size=UpperCamelCase_ , num_workers=4 )
lowerCAmelCase__ = DataLoader(UpperCamelCase_ , shuffle=UpperCamelCase_ , batch_size=UpperCamelCase_ , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ = create_model("resnet50d" , pretrained=UpperCamelCase_ , num_classes=len(UpperCamelCase_ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase__ = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
lowerCAmelCase__ = False
for param in model.get_classifier().parameters():
lowerCAmelCase__ = True
# We normalize the batches of images to be a bit faster.
lowerCAmelCase__ = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
lowerCAmelCase__ = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase__ = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
lowerCAmelCase__ = OneCycleLR(optimizer=UpperCamelCase_ , max_lr=UpperCamelCase_ , epochs=UpperCamelCase_ , steps_per_epoch=len(UpperCamelCase_ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ ,lowerCAmelCase__ = accelerator.prepare(
UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
# We need to keep track of how many total steps we have iterated over
lowerCAmelCase__ = 0
# We also need to keep track of the starting epoch so files are named properly
lowerCAmelCase__ = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"""Resumed from checkpoint: {args.resume_from_checkpoint}""" )
accelerator.load_state(args.resume_from_checkpoint )
lowerCAmelCase__ = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
lowerCAmelCase__ = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
lowerCAmelCase__ = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
lowerCAmelCase__ = os.path.splitext(UpperCamelCase_ )[0]
if "epoch" in training_difference:
lowerCAmelCase__ = int(training_difference.replace("epoch_" , "" ) ) + 1
lowerCAmelCase__ = None
else:
lowerCAmelCase__ = int(training_difference.replace("step_" , "" ) )
lowerCAmelCase__ = resume_step // len(UpperCamelCase_ )
resume_step -= starting_epoch * len(UpperCamelCase_ )
# Now we train the model
for epoch in range(UpperCamelCase_ , UpperCamelCase_ ):
model.train()
if args.with_tracking:
lowerCAmelCase__ = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
lowerCAmelCase__ = accelerator.skip_first_batches(UpperCamelCase_ , UpperCamelCase_ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
lowerCAmelCase__ = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
lowerCAmelCase__ = {k: v.to(accelerator.device ) for k, v in batch.items()}
lowerCAmelCase__ = (batch["image"] - mean) / std
lowerCAmelCase__ = model(UpperCamelCase_ )
lowerCAmelCase__ = torch.nn.functional.cross_entropy(UpperCamelCase_ , batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(UpperCamelCase_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(UpperCamelCase_ , UpperCamelCase_ ):
lowerCAmelCase__ = F"""step_{overall_step}"""
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
lowerCAmelCase__ = os.path.join(args.output_dir , UpperCamelCase_ )
accelerator.save_state(UpperCamelCase_ )
model.eval()
lowerCAmelCase__ = 0
lowerCAmelCase__ = 0
for step, batch in enumerate(UpperCamelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
lowerCAmelCase__ = {k: v.to(accelerator.device ) for k, v in batch.items()}
lowerCAmelCase__ = (batch["image"] - mean) / std
with torch.no_grad():
lowerCAmelCase__ = model(UpperCamelCase_ )
lowerCAmelCase__ = outputs.argmax(dim=-1 )
lowerCAmelCase__ ,lowerCAmelCase__ = accelerator.gather_for_metrics((predictions, batch["label"]) )
lowerCAmelCase__ = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
lowerCAmelCase__ = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}: {1_00 * eval_metric:.2f}""" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 1_00 * eval_metric,
"train_loss": total_loss.item() / len(UpperCamelCase_ ),
"epoch": epoch,
} , step=UpperCamelCase_ , )
if checkpointing_steps == "epoch":
lowerCAmelCase__ = F"""epoch_{epoch}"""
if args.output_dir is not None:
lowerCAmelCase__ = os.path.join(args.output_dir , UpperCamelCase_ )
accelerator.save_state(UpperCamelCase_ )
if args.with_tracking:
accelerator.end_training()
def A ( ) -> Optional[int]:
'''simple docstring'''
lowerCAmelCase__ = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir" , required=UpperCamelCase_ , help="The data folder on disk." )
parser.add_argument("--fp16" , action="store_true" , help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision" , type=UpperCamelCase_ , default=UpperCamelCase_ , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps" , type=UpperCamelCase_ , default=UpperCamelCase_ , help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch." , )
parser.add_argument(
"--output_dir" , type=UpperCamelCase_ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=UpperCamelCase_ , default=UpperCamelCase_ , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=UpperCamelCase_ , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
lowerCAmelCase__ = parser.parse_args()
lowerCAmelCase__ = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 2_24}
training_function(UpperCamelCase_ , UpperCamelCase_ )
if __name__ == "__main__":
main()
| 48 |
import coval # From: git+https://github.com/ns-moosavi/coval.git # noqa: F401
from coval.conll import reader, util
from coval.eval import evaluator
import datasets
__a = datasets.logging.get_logger(__name__)
__a = """\
@InProceedings{moosavi2019minimum,
author = { Nafise Sadat Moosavi, Leo Born, Massimo Poesio and Michael Strube},
title = {Using Automatically Extracted Minimum Spans to Disentangle Coreference Evaluation from Boundary Detection},
year = {2019},
booktitle = {Proceedings of the 57th Annual Meeting of
the Association for Computational Linguistics (Volume 1: Long Papers)},
publisher = {Association for Computational Linguistics},
address = {Florence, Italy},
}
@inproceedings{10.3115/1072399.1072405,
author = {Vilain, Marc and Burger, John and Aberdeen, John and Connolly, Dennis and Hirschman, Lynette},
title = {A Model-Theoretic Coreference Scoring Scheme},
year = {1995},
isbn = {1558604022},
publisher = {Association for Computational Linguistics},
address = {USA},
url = {https://doi.org/10.3115/1072399.1072405},
doi = {10.3115/1072399.1072405},
booktitle = {Proceedings of the 6th Conference on Message Understanding},
pages = {45–52},
numpages = {8},
location = {Columbia, Maryland},
series = {MUC6 ’95}
}
@INPROCEEDINGS{Bagga98algorithmsfor,
author = {Amit Bagga and Breck Baldwin},
title = {Algorithms for Scoring Coreference Chains},
booktitle = {In The First International Conference on Language Resources and Evaluation Workshop on Linguistics Coreference},
year = {1998},
pages = {563--566}
}
@INPROCEEDINGS{Luo05oncoreference,
author = {Xiaoqiang Luo},
title = {On coreference resolution performance metrics},
booktitle = {In Proc. of HLT/EMNLP},
year = {2005},
pages = {25--32},
publisher = {URL}
}
@inproceedings{moosavi-strube-2016-coreference,
title = \"Which Coreference Evaluation Metric Do You Trust? A Proposal for a Link-based Entity Aware Metric\",
author = \"Moosavi, Nafise Sadat and
Strube, Michael\",
booktitle = \"Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers)\",
month = aug,
year = \"2016\",
address = \"Berlin, Germany\",
publisher = \"Association for Computational Linguistics\",
url = \"https://www.aclweb.org/anthology/P16-1060\",
doi = \"10.18653/v1/P16-1060\",
pages = \"632--642\",
}
"""
__a = """\
CoVal is a coreference evaluation tool for the CoNLL and ARRAU datasets which
implements of the common evaluation metrics including MUC [Vilain et al, 1995],
B-cubed [Bagga and Baldwin, 1998], CEAFe [Luo et al., 2005],
LEA [Moosavi and Strube, 2016] and the averaged CoNLL score
(the average of the F1 values of MUC, B-cubed and CEAFe)
[Denis and Baldridge, 2009a; Pradhan et al., 2011].
This wrapper of CoVal currently only work with CoNLL line format:
The CoNLL format has one word per line with all the annotation for this word in column separated by spaces:
Column Type Description
1 Document ID This is a variation on the document filename
2 Part number Some files are divided into multiple parts numbered as 000, 001, 002, ... etc.
3 Word number
4 Word itself This is the token as segmented/tokenized in the Treebank. Initially the *_skel file contain the placeholder [WORD] which gets replaced by the actual token from the Treebank which is part of the OntoNotes release.
5 Part-of-Speech
6 Parse bit This is the bracketed structure broken before the first open parenthesis in the parse, and the word/part-of-speech leaf replaced with a *. The full parse can be created by substituting the asterix with the \"([pos] [word])\" string (or leaf) and concatenating the items in the rows of that column.
7 Predicate lemma The predicate lemma is mentioned for the rows for which we have semantic role information. All other rows are marked with a \"-\"
8 Predicate Frameset ID This is the PropBank frameset ID of the predicate in Column 7.
9 Word sense This is the word sense of the word in Column 3.
10 Speaker/Author This is the speaker or author name where available. Mostly in Broadcast Conversation and Web Log data.
11 Named Entities These columns identifies the spans representing various named entities.
12:N Predicate Arguments There is one column each of predicate argument structure information for the predicate mentioned in Column 7.
N Coreference Coreference chain information encoded in a parenthesis structure.
More informations on the format can be found here (section \"*_conll File Format\"): http://www.conll.cemantix.org/2012/data.html
Details on the evaluation on CoNLL can be found here: https://github.com/ns-moosavi/coval/blob/master/conll/README.md
CoVal code was written by @ns-moosavi.
Some parts are borrowed from https://github.com/clarkkev/deep-coref/blob/master/evaluation.py
The test suite is taken from https://github.com/conll/reference-coreference-scorers/
Mention evaluation and the test suite are added by @andreasvc.
Parsing CoNLL files is developed by Leo Born.
"""
__a = """
Calculates coreference evaluation metrics.
Args:
predictions: list of sentences. Each sentence is a list of word predictions to score in the CoNLL format.
Each prediction is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
references: list of sentences. Each sentence is a list of word reference to score in the CoNLL format.
Each reference is a word with its annotations as a string made of columns joined with spaces.
Only columns 4, 5, 6 and the last column are used (word, POS, Pars and coreference annotation)
See the details on the format in the description of the metric.
keep_singletons: After extracting all mentions of key or system files,
mentions whose corresponding coreference chain is of size one,
are considered as singletons. The default evaluation mode will include
singletons in evaluations if they are included in the key or the system files.
By setting 'keep_singletons=False', all singletons in the key and system files
will be excluded from the evaluation.
NP_only: Most of the recent coreference resolvers only resolve NP mentions and
leave out the resolution of VPs. By setting the 'NP_only' option, the scorer will only evaluate the resolution of NPs.
min_span: By setting 'min_span', the scorer reports the results based on automatically detected minimum spans.
Minimum spans are determined using the MINA algorithm.
Returns:
'mentions': mentions
'muc': MUC metric [Vilain et al, 1995]
'bcub': B-cubed [Bagga and Baldwin, 1998]
'ceafe': CEAFe [Luo et al., 2005]
'lea': LEA [Moosavi and Strube, 2016]
'conll_score': averaged CoNLL score (the average of the F1 values of MUC, B-cubed and CEAFe)
Examples:
>>> coval = datasets.load_metric('coval')
>>> words = ['bc/cctv/00/cctv_0005 0 0 Thank VBP (TOP(S(VP* thank 01 1 Xu_li * (V*) * -',
... 'bc/cctv/00/cctv_0005 0 1 you PRP (NP*) - - - Xu_li * (ARG1*) (ARG0*) (116)',
... 'bc/cctv/00/cctv_0005 0 2 everyone NN (NP*) - - - Xu_li * (ARGM-DIS*) * (116)',
... 'bc/cctv/00/cctv_0005 0 3 for IN (PP* - - - Xu_li * (ARG2* * -',
... 'bc/cctv/00/cctv_0005 0 4 watching VBG (S(VP*)))) watch 01 1 Xu_li * *) (V*) -',
... 'bc/cctv/00/cctv_0005 0 5 . . *)) - - - Xu_li * * * -']
>>> references = [words]
>>> predictions = [words]
>>> results = coval.compute(predictions=predictions, references=references)
>>> print(results) # doctest:+ELLIPSIS
{'mentions/recall': 1.0,[...] 'conll_score': 100.0}
"""
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_=False , lowerCAmelCase_=True , lowerCAmelCase_=False , lowerCAmelCase_="dummy_doc" ) ->Optional[Any]:
UpperCAmelCase = {doc: key_lines}
UpperCAmelCase = {doc: sys_lines}
UpperCAmelCase = {}
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase = 0
UpperCAmelCase , UpperCAmelCase = reader.get_doc_mentions(lowerCAmelCase_ , key_doc_lines[doc] , lowerCAmelCase_ )
key_singletons_num += singletons_num
if NP_only or min_span:
UpperCAmelCase = reader.set_annotated_parse_trees(lowerCAmelCase_ , key_doc_lines[doc] , lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase , UpperCAmelCase = reader.get_doc_mentions(lowerCAmelCase_ , sys_doc_lines[doc] , lowerCAmelCase_ )
sys_singletons_num += singletons_num
if NP_only or min_span:
UpperCAmelCase = reader.set_annotated_parse_trees(lowerCAmelCase_ , key_doc_lines[doc] , lowerCAmelCase_ , lowerCAmelCase_ )
if remove_nested:
UpperCAmelCase , UpperCAmelCase = reader.remove_nested_coref_mentions(lowerCAmelCase_ , lowerCAmelCase_ )
key_nested_coref_num += nested_mentions
key_removed_nested_clusters += removed_clusters
UpperCAmelCase , UpperCAmelCase = reader.remove_nested_coref_mentions(lowerCAmelCase_ , lowerCAmelCase_ )
sys_nested_coref_num += nested_mentions
sys_removed_nested_clusters += removed_clusters
UpperCAmelCase = reader.get_mention_assignments(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase = reader.get_mention_assignments(lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase = (key_clusters, sys_clusters, key_mention_sys_cluster, sys_mention_key_cluster)
if remove_nested:
logger.info(
"""Number of removed nested coreferring mentions in the key """
F"""annotation: {key_nested_coref_num}; and system annotation: {sys_nested_coref_num}""" )
logger.info(
"""Number of resulting singleton clusters in the key """
F"""annotation: {key_removed_nested_clusters}; and system annotation: {sys_removed_nested_clusters}""" )
if not keep_singletons:
logger.info(
F"""{key_singletons_num:d} and {sys_singletons_num:d} singletons are removed from the key and system """
"""files, respectively""" )
return doc_coref_infos
def _UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) ->int:
UpperCAmelCase = get_coref_infos(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
UpperCAmelCase = {}
UpperCAmelCase = 0
UpperCAmelCase = 0
for name, metric in metrics:
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = evaluator.evaluate_documents(lowerCAmelCase_ , lowerCAmelCase_ , beta=1 )
if name in ["muc", "bcub", "ceafe"]:
conll += fa
conll_subparts_num += 1
output_scores.update({F"""{name}/recall""": recall, F"""{name}/precision""": precision, F"""{name}/f1""": fa} )
logger.info(
name.ljust(1_0 ) , F"""Recall: {recall * 1_0_0:.2f}""" , F""" Precision: {precision * 1_0_0:.2f}""" , F""" F1: {fa * 1_0_0:.2f}""" , )
if conll_subparts_num == 3:
UpperCAmelCase = (conll / 3) * 1_0_0
logger.info(F"""CoNLL score: {conll:.2f}""" )
output_scores.update({"""conll_score""": conll} )
return output_scores
def _UpperCamelCase ( lowerCAmelCase_ ) ->List[Any]:
UpperCAmelCase = False
for line in key_lines:
if not line.startswith("""#""" ):
if len(line.split() ) > 6:
UpperCAmelCase = line.split()[5]
if not parse_col == "-":
UpperCAmelCase = True
break
else:
break
return has_gold_parse
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
def _lowercase ( self : int ) -> str:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Sequence(datasets.Value("""string""" ) ),
} ) , codebase_urls=["""https://github.com/ns-moosavi/coval"""] , reference_urls=[
"""https://github.com/ns-moosavi/coval""",
"""https://www.aclweb.org/anthology/P16-1060""",
"""http://www.conll.cemantix.org/2012/data.html""",
] , )
def _lowercase ( self : Union[str, Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[Any] , __lowerCamelCase : Optional[int]=True , __lowerCamelCase : Any=False , __lowerCamelCase : List[Any]=False , __lowerCamelCase : List[Any]=False ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = [
("""mentions""", evaluator.mentions),
("""muc""", evaluator.muc),
("""bcub""", evaluator.b_cubed),
("""ceafe""", evaluator.ceafe),
("""lea""", evaluator.lea),
]
if min_span:
UpperCAmelCase = util.check_gold_parse_annotation(__lowerCamelCase )
if not has_gold_parse:
raise NotImplementedError("""References should have gold parse annotation to use 'min_span'.""" )
# util.parse_key_file(key_file)
# key_file = key_file + ".parsed"
UpperCAmelCase = evaluate(
key_lines=__lowerCamelCase , sys_lines=__lowerCamelCase , metrics=__lowerCamelCase , NP_only=__lowerCamelCase , remove_nested=__lowerCamelCase , keep_singletons=__lowerCamelCase , min_span=__lowerCamelCase , )
return score
| 377 | 0 |
import json
import os
from dataclasses import dataclass
from functools import partial
from typing import Callable
import flax.linen as nn
import jax
import jax.numpy as jnp
import joblib
import optax
import wandb
from flax import jax_utils, struct, traverse_util
from flax.serialization import from_bytes, to_bytes
from flax.training import train_state
from flax.training.common_utils import shard
from tqdm.auto import tqdm
from transformers import BigBirdConfig, FlaxBigBirdForQuestionAnswering
from transformers.models.big_bird.modeling_flax_big_bird import FlaxBigBirdForQuestionAnsweringModule
class _lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase__ : BigBirdConfig
lowerCAmelCase__ : jnp.dtype = jnp.floataa
lowerCAmelCase__ : bool = True
def snake_case ( self : List[str] ):
super().setup()
__UpperCamelCase = nn.Dense(5 , dtype=self.dtype )
def __call__( self : Optional[int] , *snake_case : List[str] , **snake_case : Any ):
__UpperCamelCase = super().__call__(*snake_case , **snake_case )
__UpperCamelCase = self.cls(outputs[2] )
return outputs[:2] + (cls_out,)
class _lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowerCAmelCase__ : Tuple = FlaxBigBirdForNaturalQuestionsModule
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Any:
"""simple docstring"""
def cross_entropy(lowercase_ , lowercase_ , lowercase_=None ):
__UpperCamelCase = logits.shape[-1]
__UpperCamelCase = (labels[..., None] == jnp.arange(lowercase_ )[None]).astype('''f4''' )
__UpperCamelCase = jax.nn.log_softmax(lowercase_ , axis=-1 )
__UpperCamelCase = -jnp.sum(labels * logits , axis=-1 )
if reduction is not None:
__UpperCamelCase = reduction(lowercase_ )
return loss
__UpperCamelCase = partial(lowercase_ , reduction=jnp.mean )
__UpperCamelCase = cross_entropy(lowercase_ , lowercase_ )
__UpperCamelCase = cross_entropy(lowercase_ , lowercase_ )
__UpperCamelCase = cross_entropy(lowercase_ , lowercase_ )
return (start_loss + end_loss + pooled_loss) / 3
@dataclass
class _lowerCamelCase :
"""simple docstring"""
lowerCAmelCase__ : str = "google/bigbird-roberta-base"
lowerCAmelCase__ : int = 30_00
lowerCAmelCase__ : int = 1_05_00
lowerCAmelCase__ : int = 1_28
lowerCAmelCase__ : int = 3
lowerCAmelCase__ : int = 1
lowerCAmelCase__ : int = 5
# tx_args
lowerCAmelCase__ : float = 3E-5
lowerCAmelCase__ : float = 0.0
lowerCAmelCase__ : int = 2_00_00
lowerCAmelCase__ : float = 0.0_095
lowerCAmelCase__ : str = "bigbird-roberta-natural-questions"
lowerCAmelCase__ : str = "training-expt"
lowerCAmelCase__ : str = "data/nq-training.jsonl"
lowerCAmelCase__ : str = "data/nq-validation.jsonl"
def snake_case ( self : List[Any] ):
os.makedirs(self.base_dir , exist_ok=snake_case )
__UpperCamelCase = os.path.join(self.base_dir , self.save_dir )
__UpperCamelCase = self.batch_size_per_device * jax.device_count()
@dataclass
class _lowerCamelCase :
"""simple docstring"""
lowerCAmelCase__ : int
lowerCAmelCase__ : int = 40_96 # no dynamic padding on TPUs
def __call__( self : Any , snake_case : int ):
__UpperCamelCase = self.collate_fn(snake_case )
__UpperCamelCase = jax.tree_util.tree_map(snake_case , snake_case )
return batch
def snake_case ( self : List[str] , snake_case : Optional[Any] ):
__UpperCamelCase , __UpperCamelCase = self.fetch_inputs(features['''input_ids'''] )
__UpperCamelCase = {
'''input_ids''': jnp.array(snake_case , dtype=jnp.intaa ),
'''attention_mask''': jnp.array(snake_case , dtype=jnp.intaa ),
'''start_labels''': jnp.array(features['''start_token'''] , dtype=jnp.intaa ),
'''end_labels''': jnp.array(features['''end_token'''] , dtype=jnp.intaa ),
'''pooled_labels''': jnp.array(features['''category'''] , dtype=jnp.intaa ),
}
return batch
def snake_case ( self : Dict , snake_case : list ):
__UpperCamelCase = [self._fetch_inputs(snake_case ) for ids in input_ids]
return zip(*snake_case )
def snake_case ( self : List[str] , snake_case : list ):
__UpperCamelCase = [1 for _ in range(len(snake_case ) )]
while len(snake_case ) < self.max_length:
input_ids.append(self.pad_id )
attention_mask.append(0 )
return input_ids, attention_mask
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_=None ) -> List[str]:
"""simple docstring"""
if seed is not None:
__UpperCamelCase = dataset.shuffle(seed=lowercase_ )
for i in range(len(lowercase_ ) // batch_size ):
__UpperCamelCase = dataset[i * batch_size : (i + 1) * batch_size]
yield dict(lowercase_ )
@partial(jax.pmap , axis_name='''batch''' )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , **lowercase_ ) -> Optional[int]:
"""simple docstring"""
def loss_fn(lowercase_ ):
__UpperCamelCase = model_inputs.pop('''start_labels''' )
__UpperCamelCase = model_inputs.pop('''end_labels''' )
__UpperCamelCase = model_inputs.pop('''pooled_labels''' )
__UpperCamelCase = state.apply_fn(**lowercase_ , params=lowercase_ , dropout_rng=lowercase_ , train=lowercase_ )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = outputs
return state.loss_fn(
lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , )
__UpperCamelCase , __UpperCamelCase = jax.random.split(lowercase_ )
__UpperCamelCase = jax.value_and_grad(lowercase_ )
__UpperCamelCase , __UpperCamelCase = grad_fn(state.params )
__UpperCamelCase = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
__UpperCamelCase = jax.lax.pmean(lowercase_ , '''batch''' )
__UpperCamelCase = state.apply_gradients(grads=lowercase_ )
return state, metrics, new_drp_rng
@partial(jax.pmap , axis_name='''batch''' )
def __SCREAMING_SNAKE_CASE ( lowercase_ , **lowercase_ ) -> Tuple:
"""simple docstring"""
__UpperCamelCase = model_inputs.pop('''start_labels''' )
__UpperCamelCase = model_inputs.pop('''end_labels''' )
__UpperCamelCase = model_inputs.pop('''pooled_labels''' )
__UpperCamelCase = state.apply_fn(**lowercase_ , params=state.params , train=lowercase_ )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = outputs
__UpperCamelCase = state.loss_fn(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
__UpperCamelCase = jax.lax.pmean({'''loss''': loss} , axis_name='''batch''' )
return metrics
class _lowerCamelCase ( train_state.TrainState ):
"""simple docstring"""
lowerCAmelCase__ : Callable = struct.field(pytree_node=_SCREAMING_SNAKE_CASE )
@dataclass
class _lowerCamelCase :
"""simple docstring"""
lowerCAmelCase__ : Args
lowerCAmelCase__ : Callable
lowerCAmelCase__ : Callable
lowerCAmelCase__ : Callable
lowerCAmelCase__ : Callable
lowerCAmelCase__ : wandb
lowerCAmelCase__ : Callable = None
def snake_case ( self : Dict , snake_case : List[str] , snake_case : List[Any] , snake_case : List[str] , snake_case : Dict=None ):
__UpperCamelCase = model.params
__UpperCamelCase = TrainState.create(
apply_fn=model.__call__ , params=snake_case , tx=snake_case , loss_fn=snake_case , )
if ckpt_dir is not None:
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = restore_checkpoint(snake_case , snake_case )
__UpperCamelCase = {
'''lr''': args.lr,
'''init_lr''': args.init_lr,
'''warmup_steps''': args.warmup_steps,
'''num_train_steps''': num_train_steps,
'''weight_decay''': args.weight_decay,
}
__UpperCamelCase , __UpperCamelCase = build_tx(**snake_case )
__UpperCamelCase = train_state.TrainState(
step=snake_case , apply_fn=model.__call__ , params=snake_case , tx=snake_case , opt_state=snake_case , )
__UpperCamelCase = args
__UpperCamelCase = data_collator
__UpperCamelCase = lr
__UpperCamelCase = params
__UpperCamelCase = jax_utils.replicate(snake_case )
return state
def snake_case ( self : List[Any] , snake_case : List[str] , snake_case : Dict , snake_case : Any ):
__UpperCamelCase = self.args
__UpperCamelCase = len(snake_case ) // args.batch_size
__UpperCamelCase = jax.random.PRNGKey(0 )
__UpperCamelCase = jax.random.split(snake_case , jax.device_count() )
for epoch in range(args.max_epochs ):
__UpperCamelCase = jnp.array(0 , dtype=jnp.floataa )
__UpperCamelCase = get_batched_dataset(snake_case , args.batch_size , seed=snake_case )
__UpperCamelCase = 0
for batch in tqdm(snake_case , total=snake_case , desc=F"Running EPOCH-{epoch}" ):
__UpperCamelCase = self.data_collator(snake_case )
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase = self.train_step_fn(snake_case , snake_case , **snake_case )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
if i % args.logging_steps == 0:
__UpperCamelCase = jax_utils.unreplicate(state.step )
__UpperCamelCase = running_loss.item() / i
__UpperCamelCase = self.scheduler_fn(state_step - 1 )
__UpperCamelCase = self.evaluate(snake_case , snake_case )
__UpperCamelCase = {
'''step''': state_step.item(),
'''eval_loss''': eval_loss.item(),
'''tr_loss''': tr_loss,
'''lr''': lr.item(),
}
tqdm.write(str(snake_case ) )
self.logger.log(snake_case , commit=snake_case )
if i % args.save_steps == 0:
self.save_checkpoint(args.save_dir + F"-e{epoch}-s{i}" , state=snake_case )
def snake_case ( self : Dict , snake_case : Tuple , snake_case : List[Any] ):
__UpperCamelCase = get_batched_dataset(snake_case , self.args.batch_size )
__UpperCamelCase = len(snake_case ) // self.args.batch_size
__UpperCamelCase = jnp.array(0 , dtype=jnp.floataa )
__UpperCamelCase = 0
for batch in tqdm(snake_case , total=snake_case , desc='''Evaluating ... ''' ):
__UpperCamelCase = self.data_collator(snake_case )
__UpperCamelCase = self.val_step_fn(snake_case , **snake_case )
running_loss += jax_utils.unreplicate(metrics['''loss'''] )
i += 1
return running_loss / i
def snake_case ( self : Optional[Any] , snake_case : str , snake_case : Any ):
__UpperCamelCase = jax_utils.unreplicate(snake_case )
print(F"SAVING CHECKPOINT IN {save_dir}" , end=''' ... ''' )
self.model_save_fn(snake_case , params=state.params )
with open(os.path.join(snake_case , '''opt_state.msgpack''' ) , '''wb''' ) as f:
f.write(to_bytes(state.opt_state ) )
joblib.dump(self.args , os.path.join(snake_case , '''args.joblib''' ) )
joblib.dump(self.data_collator , os.path.join(snake_case , '''data_collator.joblib''' ) )
with open(os.path.join(snake_case , '''training_state.json''' ) , '''w''' ) as f:
json.dump({'''step''': state.step.item()} , snake_case )
print('''DONE''' )
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Tuple:
"""simple docstring"""
print(F"RESTORING CHECKPOINT FROM {save_dir}" , end=''' ... ''' )
with open(os.path.join(lowercase_ , '''flax_model.msgpack''' ) , '''rb''' ) as f:
__UpperCamelCase = from_bytes(state.params , f.read() )
with open(os.path.join(lowercase_ , '''opt_state.msgpack''' ) , '''rb''' ) as f:
__UpperCamelCase = from_bytes(state.opt_state , f.read() )
__UpperCamelCase = joblib.load(os.path.join(lowercase_ , '''args.joblib''' ) )
__UpperCamelCase = joblib.load(os.path.join(lowercase_ , '''data_collator.joblib''' ) )
with open(os.path.join(lowercase_ , '''training_state.json''' ) , '''r''' ) as f:
__UpperCamelCase = json.load(lowercase_ )
__UpperCamelCase = training_state['''step''']
print('''DONE''' )
return params, opt_state, step, args, data_collator
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> Union[str, Any]:
"""simple docstring"""
__UpperCamelCase = num_train_steps - warmup_steps
__UpperCamelCase = optax.linear_schedule(init_value=lowercase_ , end_value=lowercase_ , transition_steps=lowercase_ )
__UpperCamelCase = optax.linear_schedule(init_value=lowercase_ , end_value=1E-7 , transition_steps=lowercase_ )
__UpperCamelCase = optax.join_schedules(schedules=[warmup_fn, decay_fn] , boundaries=[warmup_steps] )
return lr
def __SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ ) -> List[str]:
"""simple docstring"""
def weight_decay_mask(lowercase_ ):
__UpperCamelCase = traverse_util.flatten_dict(lowercase_ )
__UpperCamelCase = {k: (v[-1] != '''bias''' and v[-2:] != ('''LayerNorm''', '''scale''')) for k, v in params.items()}
return traverse_util.unflatten_dict(lowercase_ )
__UpperCamelCase = scheduler_fn(lowercase_ , lowercase_ , lowercase_ , lowercase_ )
__UpperCamelCase = optax.adamw(learning_rate=lowercase_ , weight_decay=lowercase_ , mask=lowercase_ )
return tx, lr
| 718 |
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class _lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
@staticmethod
@abstractmethod
def snake_case ( snake_case : ArgumentParser ):
raise NotImplementedError()
@abstractmethod
def snake_case ( self : Optional[Any] ):
raise NotImplementedError()
| 375 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( __snake_case , __snake_case , __snake_case , unittest.TestCase ):
_UpperCamelCase : Dict = StableDiffusionInstructPixaPixPipeline
_UpperCamelCase : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "cross_attention_kwargs"}
_UpperCamelCase : Dict = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
_UpperCamelCase : List[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
_UpperCamelCase : Optional[Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __a ( self ):
torch.manual_seed(0 )
_lowercase : Optional[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=8 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , )
_lowercase : Optional[int] = PNDMScheduler(skip_prk_steps=_lowerCAmelCase )
torch.manual_seed(0 )
_lowercase : List[Any] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
_lowercase : int = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
_lowercase : Any = CLIPTextModel(_lowerCAmelCase )
_lowercase : Tuple = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_lowercase : List[str] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __a ( self , _lowerCAmelCase , _lowerCAmelCase=0 ):
_lowercase : Tuple = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
_lowercase : Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
_lowercase : Dict = Image.fromarray(np.uinta(_lowerCAmelCase ) ).convert('RGB' )
if str(_lowerCAmelCase ).startswith('mps' ):
_lowercase : List[Any] = torch.manual_seed(_lowerCAmelCase )
else:
_lowercase : int = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
_lowercase : Any = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'image_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def __a ( self ):
_lowercase : str = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase : List[Any] = self.get_dummy_components()
_lowercase : Optional[int] = StableDiffusionInstructPixaPixPipeline(**_lowerCAmelCase )
_lowercase : List[str] = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowercase : List[Any] = self.get_dummy_inputs(_lowerCAmelCase )
_lowercase : Any = sd_pipe(**_lowerCAmelCase ).images
_lowercase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_lowercase : List[str] = np.array([0.75_26, 0.37_50, 0.45_47, 0.61_17, 0.58_66, 0.50_16, 0.43_27, 0.56_42, 0.48_15] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __a ( self ):
_lowercase : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase : int = self.get_dummy_components()
_lowercase : Union[str, Any] = StableDiffusionInstructPixaPixPipeline(**_lowerCAmelCase )
_lowercase : List[Any] = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowercase : Tuple = self.get_dummy_inputs(_lowerCAmelCase )
_lowercase : Optional[Any] = 'french fries'
_lowercase : Dict = sd_pipe(**_lowerCAmelCase , negative_prompt=_lowerCAmelCase )
_lowercase : Optional[Any] = output.images
_lowercase : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
_lowercase : Any = np.array([0.75_11, 0.36_42, 0.45_53, 0.62_36, 0.57_97, 0.50_13, 0.43_43, 0.56_11, 0.48_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __a ( self ):
_lowercase : Union[str, Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase : Dict = self.get_dummy_components()
_lowercase : int = StableDiffusionInstructPixaPixPipeline(**_lowerCAmelCase )
_lowercase : Any = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowercase : Any = self.get_dummy_inputs(_lowerCAmelCase )
_lowercase : Union[str, Any] = [inputs['prompt']] * 2
_lowercase : Union[str, Any] = np.array(inputs['image'] ).astype(np.floataa ) / 2_55.0
_lowercase : Tuple = torch.from_numpy(_lowerCAmelCase ).unsqueeze(0 ).to(_lowerCAmelCase )
_lowercase : Optional[int] = image / 2 + 0.5
_lowercase : List[Any] = image.permute(0 , 3 , 1 , 2 )
_lowercase : Optional[int] = image.repeat(2 , 1 , 1 , 1 )
_lowercase : Any = sd_pipe(**_lowerCAmelCase ).images
_lowercase : List[str] = image[-1, -3:, -3:, -1]
assert image.shape == (2, 3_2, 3_2, 3)
_lowercase : Optional[int] = np.array([0.58_12, 0.57_48, 0.52_22, 0.59_08, 0.56_95, 0.71_74, 0.68_04, 0.55_23, 0.55_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __a ( self ):
_lowercase : Optional[Any] = 'cpu' # ensure determinism for the device-dependent torch.Generator
_lowercase : Dict = self.get_dummy_components()
_lowercase : Any = EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule='scaled_linear' )
_lowercase : Optional[Any] = StableDiffusionInstructPixaPixPipeline(**_lowerCAmelCase )
_lowercase : List[str] = sd_pipe.to(_lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowercase : Dict = self.get_dummy_inputs(_lowerCAmelCase )
_lowercase : List[str] = sd_pipe(**_lowerCAmelCase ).images
_lowercase : List[Any] = image[0, -3:, -3:, -1]
_lowercase : Optional[int] = [round(_lowerCAmelCase , 4 ) for x in image_slice.flatten().tolist()]
print(','.join([str(_lowerCAmelCase ) for x in slice] ) )
assert image.shape == (1, 3_2, 3_2, 3)
_lowercase : str = np.array([0.74_17, 0.38_42, 0.47_32, 0.57_76, 0.58_91, 0.51_39, 0.40_52, 0.56_73, 0.49_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __a ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __a ( self ):
_lowercase : Union[str, Any] = self.get_dummy_components()
_lowercase : Dict = StableDiffusionInstructPixaPixPipeline(**_lowerCAmelCase )
_lowercase : List[str] = VaeImageProcessor(do_resize=_lowerCAmelCase , do_normalize=_lowerCAmelCase )
_lowercase : Dict = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
_lowercase : str = pipe(**self.get_dummy_inputs_by_type(_lowerCAmelCase , input_image_type='pt' ) )[0]
_lowercase : List[str] = components['vae']
_lowercase : Optional[Any] = self.get_dummy_inputs_by_type(_lowerCAmelCase , input_image_type='pt' )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
_lowercase : Optional[int] = vae.encode(inputs[image_param] ).latent_dist.mode()
_lowercase : Optional[Any] = pipe(**_lowerCAmelCase )[0]
_lowercase : List[str] = np.abs(out - out_latents_inputs ).max()
self.assertLess(_lowerCAmelCase , 1E-4 , 'passing latents as image input generate different result from passing image' )
@slow
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
def __a ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self , _lowerCAmelCase=0 ):
_lowercase : Tuple = torch.manual_seed(_lowerCAmelCase )
_lowercase : List[str] = load_image(
'https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg' )
_lowercase : Optional[int] = {
'prompt': 'turn him into a cyborg',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'image_guidance_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def __a ( self ):
_lowercase : Optional[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=_lowerCAmelCase )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
_lowercase : Tuple = self.get_inputs()
_lowercase : Dict = pipe(**_lowerCAmelCase ).images
_lowercase : Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowercase : Optional[Any] = np.array([0.59_02, 0.60_15, 0.60_27, 0.59_83, 0.60_92, 0.60_61, 0.57_65, 0.57_85, 0.55_55] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __a ( self ):
_lowercase : str = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=_lowerCAmelCase )
_lowercase : Dict = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
_lowercase : Optional[int] = self.get_inputs()
_lowercase : Optional[int] = pipe(**_lowerCAmelCase ).images
_lowercase : Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowercase : List[Any] = np.array([0.65_78, 0.68_17, 0.69_72, 0.67_61, 0.68_56, 0.69_16, 0.64_28, 0.65_16, 0.63_01] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __a ( self ):
_lowercase : Tuple = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=_lowerCAmelCase )
_lowercase : Optional[int] = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
_lowercase : Tuple = self.get_inputs()
_lowercase : int = pipe(**_lowerCAmelCase ).images
_lowercase : Tuple = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
_lowercase : str = np.array([0.38_28, 0.38_34, 0.38_18, 0.37_92, 0.38_65, 0.37_52, 0.37_92, 0.38_47, 0.37_53] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __a ( self ):
_lowercase : Dict = 0
def callback_fn(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ) -> None:
_lowercase : Any = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
_lowercase : Optional[int] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
_lowercase : Dict = latents[0, -3:, -3:, -1]
_lowercase : Any = np.array([-0.24_63, -0.46_44, -0.97_56, 1.51_76, 1.44_14, 0.78_66, 0.98_97, 0.85_21, 0.79_83] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
_lowercase : List[str] = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
_lowercase : List[Any] = latents[0, -3:, -3:, -1]
_lowercase : str = np.array([-0.26_44, -0.46_26, -0.96_53, 1.51_76, 1.45_51, 0.76_86, 0.98_05, 0.84_52, 0.81_15] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
_lowercase : Tuple = False
_lowercase : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=_lowerCAmelCase , torch_dtype=torch.floataa )
_lowercase : str = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
_lowercase : Any = self.get_inputs()
pipe(**_lowerCAmelCase , callback=_lowerCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __a ( self ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_lowercase : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
'timbrooks/instruct-pix2pix' , safety_checker=_lowerCAmelCase , torch_dtype=torch.floataa )
_lowercase : Dict = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_lowercase : List[Any] = self.get_inputs()
_lowercase : List[Any] = pipe(**_lowerCAmelCase )
_lowercase : Optional[int] = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 1_0**9
def __a ( self ):
_lowercase : Union[str, Any] = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
_lowercase : Union[str, Any] = inputs['image'].resize((5_0_4, 5_0_4) )
_lowercase : List[str] = 'timbrooks/instruct-pix2pix'
_lowercase : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
_lowerCAmelCase , safety_checker=_lowerCAmelCase , )
pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
pipe.enable_attention_slicing()
_lowercase : Any = pipe(**_lowerCAmelCase )
_lowercase : List[str] = output.images[0]
_lowercase : List[Any] = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 5_0_4, 3)
_lowercase : Tuple = np.array([0.27_26, 0.25_29, 0.26_64, 0.26_55, 0.26_41, 0.26_42, 0.25_91, 0.26_49, 0.25_90] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 66 |
from __future__ import annotations
def lowerCAmelCase__ ( UpperCamelCase_ : dict , UpperCamelCase_ : str )-> set[str]:
A__ , A__ = set(UpperCamelCase_ ), [start]
while stack:
A__ = stack.pop()
explored.add(UpperCamelCase_ )
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v] ):
if adj not in explored:
stack.append(UpperCamelCase_ )
return explored
_lowercase = {
"A": ["B", "C", "D"],
"B": ["A", "D", "E"],
"C": ["A", "F"],
"D": ["B", "D"],
"E": ["B", "F"],
"F": ["C", "E", "G"],
"G": ["F"],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, "A"))
| 632 | 0 |
'''simple docstring'''
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LDMTextToImagePipeline, UNetaDConditionModel
from diffusers.utils.testing_utils import (
enable_full_determinism,
load_numpy,
nightly,
require_torch_gpu,
slow,
torch_device,
)
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _UpperCamelCase ( SCREAMING_SNAKE_CASE , unittest.TestCase):
'''simple docstring'''
_snake_case = LDMTextToImagePipeline
_snake_case = TEXT_TO_IMAGE_PARAMS - {
'''negative_prompt''',
'''negative_prompt_embeds''',
'''cross_attention_kwargs''',
'''prompt_embeds''',
}
_snake_case = PipelineTesterMixin.required_optional_params - {
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
_snake_case = TEXT_TO_IMAGE_BATCH_PARAMS
_snake_case = False
def a__ ( self ) -> Optional[int]:
torch.manual_seed(0 )
lowercase : Dict = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
lowercase : List[str] = DDIMScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="scaled_linear" , clip_sample=a_ , set_alpha_to_one=a_ , )
torch.manual_seed(0 )
lowercase : str = AutoencoderKL(
block_out_channels=(3_2, 6_4) , in_channels=3 , out_channels=3 , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , latent_channels=4 , )
torch.manual_seed(0 )
lowercase : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
lowercase : Tuple = CLIPTextModel(a_ )
lowercase : int = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
lowercase : Any = {
"unet": unet,
"scheduler": scheduler,
"vqvae": vae,
"bert": text_encoder,
"tokenizer": tokenizer,
}
return components
def a__ ( self , a_ , a_=0 ) -> Optional[int]:
if str(a_ ).startswith("mps" ):
lowercase : Optional[Any] = torch.manual_seed(a_ )
else:
lowercase : Optional[Any] = torch.Generator(device=a_ ).manual_seed(a_ )
lowercase : List[str] = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def a__ ( self ) -> List[str]:
lowercase : Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
lowercase : Tuple = self.get_dummy_components()
lowercase : List[str] = LDMTextToImagePipeline(**a_ )
pipe.to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowercase : Dict = self.get_dummy_inputs(a_ )
lowercase : Dict = pipe(**a_ ).images
lowercase : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 1_6, 1_6, 3)
lowercase : Optional[Any] = np.array([0.61_01, 0.61_56, 0.56_22, 0.48_95, 0.66_61, 0.38_04, 0.57_48, 0.61_36, 0.50_14] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
@slow
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase):
'''simple docstring'''
def a__ ( self ) -> Dict:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self , a_ , a_=torch.floataa , a_=0 ) -> List[str]:
lowercase : List[Any] = torch.manual_seed(a_ )
lowercase : Union[str, Any] = np.random.RandomState(a_ ).standard_normal((1, 4, 3_2, 3_2) )
lowercase : Optional[Any] = torch.from_numpy(a_ ).to(device=a_ , dtype=a_ )
lowercase : Optional[int] = {
"prompt": "A painting of a squirrel eating a burger",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def a__ ( self ) -> int:
lowercase : List[str] = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowercase : Dict = self.get_inputs(a_ )
lowercase : Any = pipe(**a_ ).images
lowercase : Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 2_5_6, 2_5_6, 3)
lowercase : List[str] = np.array([0.5_18_25, 0.5_28_50, 0.5_25_43, 0.5_42_58, 0.5_23_04, 0.5_25_69, 0.5_43_63, 0.5_52_76, 0.5_68_78] )
lowercase : str = np.abs(expected_slice - image_slice ).max()
assert max_diff < 1e-3
@nightly
@require_torch_gpu
class _UpperCamelCase ( unittest.TestCase):
'''simple docstring'''
def a__ ( self ) -> List[str]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ ( self , a_ , a_=torch.floataa , a_=0 ) -> Any:
lowercase : List[str] = torch.manual_seed(a_ )
lowercase : Any = np.random.RandomState(a_ ).standard_normal((1, 4, 3_2, 3_2) )
lowercase : str = torch.from_numpy(a_ ).to(device=a_ , dtype=a_ )
lowercase : Dict = {
"prompt": "A painting of a squirrel eating a burger",
"latents": latents,
"generator": generator,
"num_inference_steps": 5_0,
"guidance_scale": 6.0,
"output_type": "numpy",
}
return inputs
def a__ ( self ) -> int:
lowercase : Dict = LDMTextToImagePipeline.from_pretrained("CompVis/ldm-text2im-large-256" ).to(a_ )
pipe.set_progress_bar_config(disable=a_ )
lowercase : str = self.get_inputs(a_ )
lowercase : str = pipe(**a_ ).images[0]
lowercase : Dict = load_numpy(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/ldm_text2img/ldm_large_256_ddim.npy" )
lowercase : Any = np.abs(expected_image - image ).max()
assert max_diff < 1e-3
| 425 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
from unittest import TestCase
from transformers import BartTokenizer, BartTokenizerFast, DPRQuestionEncoderTokenizer, DPRQuestionEncoderTokenizerFast
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_tokenizers, require_torch, slow
from transformers.utils import is_datasets_available, is_faiss_available, is_torch_available
if is_torch_available() and is_datasets_available() and is_faiss_available():
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.tokenization_rag import RagTokenizer
@require_faiss
@require_torch
class _UpperCamelCase ( SCREAMING_SNAKE_CASE):
'''simple docstring'''
def a__ ( self ) -> Optional[int]:
lowercase : List[Any] = tempfile.mkdtemp()
lowercase : int = 8
# DPR tok
lowercase : List[Any] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
lowercase : str = os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(a_ , exist_ok=a_ )
lowercase : int = os.path.join(a_ , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
lowercase : Optional[Any] = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowercase : int = dict(zip(a_ , range(len(a_ ) ) ) )
lowercase : Dict = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowercase : Dict = {"unk_token": "<unk>"}
lowercase : List[Any] = os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(a_ , exist_ok=a_ )
lowercase : Union[str, Any] = os.path.join(a_ , BART_VOCAB_FILES_NAMES["vocab_file"] )
lowercase : Dict = os.path.join(a_ , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(a_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(a_ ) )
def a__ ( self ) -> DPRQuestionEncoderTokenizer:
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def a__ ( self ) -> BartTokenizer:
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def a__ ( self ) -> Union[str, Any]:
shutil.rmtree(self.tmpdirname )
@require_tokenizers
def a__ ( self ) -> Any:
lowercase : Dict = os.path.join(self.tmpdirname , "rag_tokenizer" )
lowercase : Optional[int] = RagConfig(question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() )
lowercase : Tuple = RagTokenizer(question_encoder=self.get_dpr_tokenizer() , generator=self.get_bart_tokenizer() )
rag_config.save_pretrained(a_ )
rag_tokenizer.save_pretrained(a_ )
lowercase : Union[str, Any] = RagTokenizer.from_pretrained(a_ , config=a_ )
self.assertIsInstance(new_rag_tokenizer.question_encoder , a_ )
self.assertEqual(new_rag_tokenizer.question_encoder.get_vocab() , rag_tokenizer.question_encoder.get_vocab() )
self.assertIsInstance(new_rag_tokenizer.generator , a_ )
self.assertEqual(new_rag_tokenizer.generator.get_vocab() , rag_tokenizer.generator.get_vocab() )
@slow
def a__ ( self ) -> Union[str, Any]:
lowercase : List[Any] = RagTokenizer.from_pretrained("facebook/rag-token-nq" )
lowercase : List[str] = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
lowercase : Union[str, Any] = tokenizer(a_ )
self.assertIsNotNone(a_ )
@slow
def a__ ( self ) -> List[str]:
lowercase : str = RagTokenizer.from_pretrained("facebook/rag-sequence-nq" )
lowercase : Union[str, Any] = [
"who got the first nobel prize in physics",
"when is the next deadpool movie being released",
"which mode is used for short wave broadcast service",
"who is the owner of reading football club",
"when is the next scandal episode coming out",
"when is the last time the philadelphia won the superbowl",
"what is the most current adobe flash player version",
"how many episodes are there in dragon ball z",
"what is the first step in the evolution of the eye",
"where is gall bladder situated in human body",
"what is the main mineral in lithium batteries",
"who is the president of usa right now",
"where do the greasers live in the outsiders",
"panda is a national animal of which country",
"what is the name of manchester united stadium",
]
lowercase : Dict = tokenizer(a_ )
self.assertIsNotNone(a_ )
| 425 | 1 |
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
__a = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase=False , ):
'''simple docstring'''
output_path.parent.mkdir(parents=_lowercase , exist_ok=_lowercase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
_lowercase , _lowercase , f=output_path.as_posix() , input_names=_lowercase , output_names=_lowercase , dynamic_axes=_lowercase , do_constant_folding=_lowercase , use_external_data_format=_lowercase , enable_onnx_checker=_lowercase , opset_version=_lowercase , )
else:
export(
_lowercase , _lowercase , f=output_path.as_posix() , input_names=_lowercase , output_names=_lowercase , dynamic_axes=_lowercase , do_constant_folding=_lowercase , opset_version=_lowercase , )
@torch.no_grad()
def lowerCamelCase__ ( _lowercase , _lowercase , _lowercase , _lowercase = False ):
'''simple docstring'''
UpperCAmelCase_ : Any = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
UpperCAmelCase_ : Tuple = '''cuda'''
elif fpaa and not torch.cuda.is_available():
raise ValueError('''`float16` model export is only supported on GPUs with CUDA''' )
else:
UpperCAmelCase_ : Optional[int] = '''cpu'''
UpperCAmelCase_ : List[str] = Path(_lowercase )
# VAE DECODER
UpperCAmelCase_ : Optional[Any] = AutoencoderKL.from_pretrained(model_path + '''/vae''' )
UpperCAmelCase_ : Union[str, Any] = vae_decoder.config.latent_channels
# forward only through the decoder part
UpperCAmelCase_ : Optional[Any] = vae_decoder.decode
onnx_export(
_lowercase , model_args=(
torch.randn(1 , _lowercase , 25 , 25 ).to(device=_lowercase , dtype=_lowercase ),
False,
) , output_path=output_path / '''vae_decoder''' / '''model.onnx''' , ordered_input_names=['''latent_sample''', '''return_dict'''] , output_names=['''sample'''] , dynamic_axes={
'''latent_sample''': {0: '''batch''', 1: '''channels''', 2: '''height''', 3: '''width'''},
} , opset=_lowercase , )
del vae_decoder
if __name__ == "__main__":
__a = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=14,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
__a = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('SD: Done: ONNX') | 30 |
from typing import List, Optional, Union
from ...image_utils import ImageInput
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class lowercase__ ( _UpperCAmelCase ):
A__ : Tuple =["""image_processor""", """tokenizer"""]
A__ : Dict ="""BlipImageProcessor"""
A__ : str ="""AutoTokenizer"""
def __init__( self : Optional[Any] , UpperCAmelCase_ : List[Any] , UpperCAmelCase_ : int ):
SCREAMING_SNAKE_CASE__ = False
super().__init__(UpperCAmelCase_ , UpperCAmelCase_ )
SCREAMING_SNAKE_CASE__ = self.image_processor
def __call__( self : Optional[int] , UpperCAmelCase_ : ImageInput = None , UpperCAmelCase_ : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Union[bool, str, PaddingStrategy] = False , UpperCAmelCase_ : Union[bool, str, TruncationStrategy] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : int = 0 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[bool] = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : Optional[Union[str, TensorType]] = None , **UpperCAmelCase_ : int , ):
if images is None and text is None:
raise ValueError('You have to specify either images or text.' )
# Get only text
if images is None:
SCREAMING_SNAKE_CASE__ = self.tokenizer
SCREAMING_SNAKE_CASE__ = self.tokenizer(
text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
return text_encoding
# add pixel_values
SCREAMING_SNAKE_CASE__ = self.image_processor(UpperCAmelCase_ , return_tensors=UpperCAmelCase_ )
if text is not None:
SCREAMING_SNAKE_CASE__ = self.tokenizer(
text=UpperCAmelCase_ , add_special_tokens=UpperCAmelCase_ , padding=UpperCAmelCase_ , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ , stride=UpperCAmelCase_ , pad_to_multiple_of=UpperCAmelCase_ , return_attention_mask=UpperCAmelCase_ , return_overflowing_tokens=UpperCAmelCase_ , return_special_tokens_mask=UpperCAmelCase_ , return_offsets_mapping=UpperCAmelCase_ , return_token_type_ids=UpperCAmelCase_ , return_length=UpperCAmelCase_ , verbose=UpperCAmelCase_ , return_tensors=UpperCAmelCase_ , **UpperCAmelCase_ , )
else:
SCREAMING_SNAKE_CASE__ = None
if text_encoding is not None:
encoding_image_processor.update(UpperCAmelCase_ )
return encoding_image_processor
def A_ ( self : Union[str, Any] , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[str] ):
return self.tokenizer.batch_decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
def A_ ( self : Any , *UpperCAmelCase_ : Any , **UpperCAmelCase_ : List[Any] ):
return self.tokenizer.decode(*UpperCAmelCase_ , **UpperCAmelCase_ )
@property
# Copied from transformers.models.blip.processing_blip.BlipProcessor.model_input_names
def A_ ( self : int ):
SCREAMING_SNAKE_CASE__ = self.tokenizer.model_input_names
SCREAMING_SNAKE_CASE__ = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 472 | 0 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
__A : Dict = logging.get_logger(__name__)
__A : List[Any] = {'vocab_file': 'vocab.json', 'merges_file': 'merges.txt', 'tokenizer_file': 'tokenizer.json'}
__A : Optional[Any] = {
'vocab_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json',
},
'merges_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt',
},
'tokenizer_file': {
'allenai/led-base-16384': 'https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json',
},
}
__A : Union[str, Any] = {
'allenai/led-base-16384': 1_63_84,
}
class _SCREAMING_SNAKE_CASE ( lowerCAmelCase__):
_UpperCamelCase:Optional[int] = VOCAB_FILES_NAMES
_UpperCamelCase:str = PRETRAINED_VOCAB_FILES_MAP
_UpperCamelCase:int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCamelCase:Union[str, Any] = LEDTokenizer
_UpperCamelCase:Dict = ["input_ids", "attention_mask"]
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="replace" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , )-> Union[str, Any]:
super().__init__(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , errors=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , add_prefix_space=_SCREAMING_SNAKE_CASE , trim_offsets=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
lowerCamelCase_ =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , _SCREAMING_SNAKE_CASE ) != add_prefix_space:
lowerCamelCase_ =getattr(_SCREAMING_SNAKE_CASE , pre_tok_state.pop("""type""" ) )
lowerCamelCase_ =add_prefix_space
lowerCamelCase_ =pre_tok_class(**_SCREAMING_SNAKE_CASE )
lowerCamelCase_ =add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowerCamelCase_ ="""post_processor"""
lowerCamelCase_ =getattr(self.backend_tokenizer , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if tokenizer_component_instance:
lowerCamelCase_ =json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCamelCase_ =tuple(state["""sep"""] )
if "cls" in state:
lowerCamelCase_ =tuple(state["""cls"""] )
lowerCamelCase_ =False
if state.get("""add_prefix_space""" , _SCREAMING_SNAKE_CASE ) != add_prefix_space:
lowerCamelCase_ =add_prefix_space
lowerCamelCase_ =True
if state.get("""trim_offsets""" , _SCREAMING_SNAKE_CASE ) != trim_offsets:
lowerCamelCase_ =trim_offsets
lowerCamelCase_ =True
if changes_to_apply:
lowerCamelCase_ =getattr(_SCREAMING_SNAKE_CASE , state.pop("""type""" ) )
lowerCamelCase_ =component_class(**_SCREAMING_SNAKE_CASE )
setattr(self.backend_tokenizer , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _snake_case ( self )-> str:
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def _snake_case ( self , _SCREAMING_SNAKE_CASE )-> Dict:
lowerCamelCase_ =AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else value
lowerCamelCase_ =value
def _snake_case ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )-> BatchEncoding:
lowerCamelCase_ =kwargs.get("""is_split_into_words""" , _SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )-> BatchEncoding:
lowerCamelCase_ =kwargs.get("""is_split_into_words""" , _SCREAMING_SNAKE_CASE )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )-> Tuple[str]:
lowerCamelCase_ =self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None )-> Optional[Any]:
lowerCamelCase_ =[self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None )-> List[int]:
lowerCamelCase_ =[self.sep_token_id]
lowerCamelCase_ =[self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = PaddingStrategy.DO_NOT_PAD , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , )-> dict:
lowerCamelCase_ =super()._pad(
encoded_inputs=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE , padding_strategy=_SCREAMING_SNAKE_CASE , pad_to_multiple_of=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , )
# Load from model defaults
if return_attention_mask is None:
lowerCamelCase_ ="""attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
lowerCamelCase_ =encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
lowerCamelCase_ =len(encoded_inputs["""global_attention_mask"""] ) != len(_SCREAMING_SNAKE_CASE )
if needs_to_be_padded:
lowerCamelCase_ =len(_SCREAMING_SNAKE_CASE ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
lowerCamelCase_ =(
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
lowerCamelCase_ =[-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs
| 75 |
# Imports
import numpy as np
class _SCREAMING_SNAKE_CASE :
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None )-> Any:
self.set_matricies(red=_SCREAMING_SNAKE_CASE , green=_SCREAMING_SNAKE_CASE , blue=_SCREAMING_SNAKE_CASE , red_edge=_SCREAMING_SNAKE_CASE , nir=_SCREAMING_SNAKE_CASE )
def _snake_case ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None )-> Union[str, Any]:
if red is not None:
lowerCamelCase_ =red
if green is not None:
lowerCamelCase_ =green
if blue is not None:
lowerCamelCase_ =blue
if red_edge is not None:
lowerCamelCase_ =red_edge
if nir is not None:
lowerCamelCase_ =nir
return True
def _snake_case ( self , _SCREAMING_SNAKE_CASE="" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None )-> Union[str, Any]:
self.set_matricies(red=_SCREAMING_SNAKE_CASE , green=_SCREAMING_SNAKE_CASE , blue=_SCREAMING_SNAKE_CASE , red_edge=_SCREAMING_SNAKE_CASE , nir=_SCREAMING_SNAKE_CASE )
lowerCamelCase_ ={
"""ARVI2""": self.arvaa,
"""CCCI""": self.ccci,
"""CVI""": self.cvi,
"""GLI""": self.gli,
"""NDVI""": self.ndvi,
"""BNDVI""": self.bndvi,
"""redEdgeNDVI""": self.red_edge_ndvi,
"""GNDVI""": self.gndvi,
"""GBNDVI""": self.gbndvi,
"""GRNDVI""": self.grndvi,
"""RBNDVI""": self.rbndvi,
"""PNDVI""": self.pndvi,
"""ATSAVI""": self.atsavi,
"""BWDRVI""": self.bwdrvi,
"""CIgreen""": self.ci_green,
"""CIrededge""": self.ci_rededge,
"""CI""": self.ci,
"""CTVI""": self.ctvi,
"""GDVI""": self.gdvi,
"""EVI""": self.evi,
"""GEMI""": self.gemi,
"""GOSAVI""": self.gosavi,
"""GSAVI""": self.gsavi,
"""Hue""": self.hue,
"""IVI""": self.ivi,
"""IPVI""": self.ipvi,
"""I""": self.i,
"""RVI""": self.rvi,
"""MRVI""": self.mrvi,
"""MSAVI""": self.m_savi,
"""NormG""": self.norm_g,
"""NormNIR""": self.norm_nir,
"""NormR""": self.norm_r,
"""NGRDI""": self.ngrdi,
"""RI""": self.ri,
"""S""": self.s,
"""IF""": self._if,
"""DVI""": self.dvi,
"""TVI""": self.tvi,
"""NDRE""": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("""Index not in the list!""" )
return False
def _snake_case ( self )-> Optional[Any]:
return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red)))
def _snake_case ( self )-> Tuple:
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def _snake_case ( self )-> str:
return self.nir * (self.red / (self.green**2))
def _snake_case ( self )-> Optional[int]:
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def _snake_case ( self )-> Tuple:
return (self.nir - self.red) / (self.nir + self.red)
def _snake_case ( self )-> Dict:
return (self.nir - self.blue) / (self.nir + self.blue)
def _snake_case ( self )-> List[Any]:
return (self.redEdge - self.red) / (self.redEdge + self.red)
def _snake_case ( self )-> Tuple:
return (self.nir - self.green) / (self.nir + self.green)
def _snake_case ( self )-> Optional[int]:
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def _snake_case ( self )-> List[str]:
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def _snake_case ( self )-> List[str]:
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def _snake_case ( self )-> Optional[int]:
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def _snake_case ( self , _SCREAMING_SNAKE_CASE=0.0_8 , _SCREAMING_SNAKE_CASE=1.2_2 , _SCREAMING_SNAKE_CASE=0.0_3 )-> Any:
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def _snake_case ( self )-> Tuple:
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def _snake_case ( self )-> Any:
return (self.nir / self.green) - 1
def _snake_case ( self )-> Union[str, Any]:
return (self.nir / self.redEdge) - 1
def _snake_case ( self )-> Union[str, Any]:
return (self.red - self.blue) / self.red
def _snake_case ( self )-> Dict:
lowerCamelCase_ =self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def _snake_case ( self )-> int:
return self.nir - self.green
def _snake_case ( self )-> Dict:
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def _snake_case ( self )-> List[str]:
lowerCamelCase_ =(2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red)
def _snake_case ( self , _SCREAMING_SNAKE_CASE=0.1_6 )-> List[Any]:
return (self.nir - self.green) / (self.nir + self.green + y)
def _snake_case ( self , _SCREAMING_SNAKE_CASE=0.5 )-> Dict:
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def _snake_case ( self )-> int:
return np.arctan(
((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) )
def _snake_case ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None )-> Union[str, Any]:
return (self.nir - b) / (a * self.red)
def _snake_case ( self )-> int:
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def _snake_case ( self )-> Optional[Any]:
return (self.red + self.green + self.blue) / 3_0.5
def _snake_case ( self )-> List[str]:
return self.nir / self.red
def _snake_case ( self )-> List[str]:
return (self.rvi() - 1) / (self.rvi() + 1)
def _snake_case ( self )-> str:
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def _snake_case ( self )-> List[Any]:
return self.green / (self.nir + self.red + self.green)
def _snake_case ( self )-> Dict:
return self.nir / (self.nir + self.red + self.green)
def _snake_case ( self )-> List[str]:
return self.red / (self.nir + self.red + self.green)
def _snake_case ( self )-> int:
return (self.green - self.red) / (self.green + self.red)
def _snake_case ( self )-> str:
return (self.red - self.green) / (self.red + self.green)
def _snake_case ( self )-> str:
lowerCamelCase_ =np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
lowerCamelCase_ =np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def _snake_case ( self )-> List[str]:
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def _snake_case ( self )-> List[Any]:
return self.nir / self.red
def _snake_case ( self )-> Optional[int]:
return (self.ndvi() + 0.5) ** (1 / 2)
def _snake_case ( self )-> str:
return (self.nir - self.redEdge) / (self.nir + self.redEdge)
| 75 | 1 |
import os
from bleurt import score # From: git+https://github.com/google-research/bleurt.git
import datasets
lowerCamelCase =datasets.logging.get_logger(__name__)
lowerCamelCase ="\\n@inproceedings{bleurt,\n title={BLEURT: Learning Robust Metrics for Text Generation},\n author={Thibault Sellam and Dipanjan Das and Ankur P. Parikh},\n booktitle={ACL},\n year={2020},\n url={https://arxiv.org/abs/2004.04696}\n}\n"
lowerCamelCase ="\\nBLEURT a learnt evaluation metric for Natural Language Generation. It is built using multiple phases of transfer learning starting from a pretrained BERT model (Devlin et al. 2018)\nand then employing another pre-training phrase using synthetic data. Finally it is trained on WMT human annotations. You may run BLEURT out-of-the-box or fine-tune\nit for your specific application (the latter is expected to perform better).\n\nSee the project's README at https://github.com/google-research/bleurt#readme for more information.\n"
lowerCamelCase ="\nBLEURT score.\n\nArgs:\n `predictions` (list of str): prediction/candidate sentences\n `references` (list of str): reference sentences\n `checkpoint` BLEURT checkpoint. Will default to BLEURT-tiny if None.\n\nReturns:\n 'scores': List of scores.\nExamples:\n\n >>> predictions = [\"hello there\", \"general kenobi\"]\n >>> references = [\"hello there\", \"general kenobi\"]\n >>> bleurt = datasets.load_metric(\"bleurt\")\n >>> results = bleurt.compute(predictions=predictions, references=references)\n >>> print([round(v, 2) for v in results[\"scores\"]])\n [1.03, 1.04]\n"
lowerCamelCase ={
"bleurt-tiny-128": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-128.zip",
"bleurt-tiny-512": "https://storage.googleapis.com/bleurt-oss/bleurt-tiny-512.zip",
"bleurt-base-128": "https://storage.googleapis.com/bleurt-oss/bleurt-base-128.zip",
"bleurt-base-512": "https://storage.googleapis.com/bleurt-oss/bleurt-base-512.zip",
"bleurt-large-128": "https://storage.googleapis.com/bleurt-oss/bleurt-large-128.zip",
"bleurt-large-512": "https://storage.googleapis.com/bleurt-oss/bleurt-large-512.zip",
"BLEURT-20-D3": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D3.zip",
"BLEURT-20-D6": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D6.zip",
"BLEURT-20-D12": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20-D12.zip",
"BLEURT-20": "https://storage.googleapis.com/bleurt-oss-21/BLEURT-20.zip",
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCamelCase ( datasets.Metric ):
"""simple docstring"""
def __SCREAMING_SNAKE_CASE ( self ) -> Dict:
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='''https://github.com/google-research/bleurt''' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''string''' , id='''sequence''' ),
'''references''': datasets.Value('''string''' , id='''sequence''' ),
} ) , codebase_urls=['''https://github.com/google-research/bleurt'''] , reference_urls=['''https://github.com/google-research/bleurt''', '''https://arxiv.org/abs/2004.04696'''] , )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE ) -> str:
"""simple docstring"""
if self.config_name == "default":
logger.warning(
'''Using default BLEURT-Base checkpoint for sequence maximum length 128. '''
'''You can use a bigger model for better results with e.g.: datasets.load_metric(\'bleurt\', \'bleurt-large-512\').''' )
UpperCamelCase__ : str = '''bleurt-base-128'''
if self.config_name.lower() in CHECKPOINT_URLS:
UpperCamelCase__ : Dict = self.config_name.lower()
elif self.config_name.upper() in CHECKPOINT_URLS:
UpperCamelCase__ : Any = self.config_name.upper()
else:
raise KeyError(
F'''{self.config_name} model not found. You should supply the name of a model checkpoint for bleurt in {CHECKPOINT_URLS.keys()}''' )
# download the model checkpoint specified by self.config_name and set up the scorer
UpperCamelCase__ : Any = dl_manager.download_and_extract(CHECKPOINT_URLS[checkpoint_name] )
UpperCamelCase__ : List[Any] = score.BleurtScorer(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) )
def __SCREAMING_SNAKE_CASE ( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase__ : Optional[Any] = self.scorer.score(references=__SCREAMING_SNAKE_CASE , candidates=__SCREAMING_SNAKE_CASE )
return {"scores": scores}
| 285 |
from math import factorial, pi
def lowerCamelCase_ ( UpperCAmelCase_ : float , UpperCAmelCase_ : int = 30 ):
if not isinstance(UpperCAmelCase_ , (int, float) ):
raise ValueError('''maclaurin_sin() requires either an int or float for theta''' )
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or accuracy <= 0:
raise ValueError('''maclaurin_sin() requires a positive int for accuracy''' )
lowercase : Tuple = float(UpperCAmelCase_ )
lowercase : List[str] = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(UpperCAmelCase_ ) )
def lowerCamelCase_ ( UpperCAmelCase_ : float , UpperCAmelCase_ : int = 30 ):
if not isinstance(UpperCAmelCase_ , (int, float) ):
raise ValueError('''maclaurin_cos() requires either an int or float for theta''' )
if not isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) or accuracy <= 0:
raise ValueError('''maclaurin_cos() requires a positive int for accuracy''' )
lowercase : Optional[Any] = float(UpperCAmelCase_ )
lowercase : Dict = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(UpperCAmelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 583 | 0 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class a_ ( snake_case_ , unittest.TestCase ):
'''simple docstring'''
lowerCamelCase__ : int = KandinskyVaaControlnetImgaImgPipeline
lowerCamelCase__ : Optional[int] = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCamelCase__ : Dict = ['image_embeds', 'negative_image_embeds', 'image', 'hint']
lowerCamelCase__ : str = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowerCamelCase__ : Any = False
@property
def a__ (self ):
'''simple docstring'''
return 3_2
@property
def a__ (self ):
'''simple docstring'''
return 3_2
@property
def a__ (self ):
'''simple docstring'''
return self.time_input_dim
@property
def a__ (self ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def a__ (self ):
'''simple docstring'''
return 1_0_0
@property
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Optional[int] = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
lowerCamelCase__ : int = UNetaDConditionModel(**lowerCamelCase_ )
return model
@property
def a__ (self ):
'''simple docstring'''
return {
"block_out_channels": [3_2, 3_2, 6_4, 6_4],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def a__ (self ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCamelCase__ : Optional[Any] = VQModel(**self.dummy_movq_kwargs )
return model
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = self.dummy_unet
lowerCamelCase__ : List[Any] = self.dummy_movq
lowerCamelCase__ : Tuple = {
'num_train_timesteps': 1_0_0_0,
'beta_schedule': 'linear',
'beta_start': 0.00_085,
'beta_end': 0.012,
'clip_sample': False,
'set_alpha_to_one': False,
'steps_offset': 0,
'prediction_type': 'epsilon',
'thresholding': False,
}
lowerCamelCase__ : Optional[Any] = DDIMScheduler(**lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def a__ (self, lowerCamelCase_, lowerCamelCase_=0 ):
'''simple docstring'''
lowerCamelCase__ : List[Any] = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : int = floats_tensor((1, self.text_embedder_hidden_size), rng=random.Random(seed + 1 ) ).to(
lowerCamelCase_ )
# create init_image
lowerCamelCase__ : Any = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
lowerCamelCase__ : Dict = image.cpu().permute(0, 2, 3, 1 )[0]
lowerCamelCase__ : Optional[Any] = Image.fromarray(np.uinta(lowerCamelCase_ ) ).convert('RGB' ).resize((2_5_6, 2_5_6) )
# create hint
lowerCamelCase__ : Dict = floats_tensor((1, 3, 6_4, 6_4), rng=random.Random(lowerCamelCase_ ) ).to(lowerCamelCase_ )
if str(lowerCamelCase_ ).startswith('mps' ):
lowerCamelCase__ : int = torch.manual_seed(lowerCamelCase_ )
else:
lowerCamelCase__ : Any = torch.Generator(device=lowerCamelCase_ ).manual_seed(lowerCamelCase_ )
lowerCamelCase__ : Optional[Any] = {
'image': init_image,
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 6_4,
'width': 6_4,
'num_inference_steps': 1_0,
'guidance_scale': 7.0,
'strength': 0.2,
'output_type': 'np',
}
return inputs
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : List[str] = 'cpu'
lowerCamelCase__ : List[Any] = self.get_dummy_components()
lowerCamelCase__ : List[Any] = self.pipeline_class(**lowerCamelCase_ )
lowerCamelCase__ : Dict = pipe.to(lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : Any = pipe(**self.get_dummy_inputs(lowerCamelCase_ ) )
lowerCamelCase__ : List[Any] = output.images
lowerCamelCase__ : str = pipe(
**self.get_dummy_inputs(lowerCamelCase_ ), return_dict=lowerCamelCase_, )[0]
lowerCamelCase__ : int = image[0, -3:, -3:, -1]
lowerCamelCase__ : Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCamelCase__ : List[str] = np.array(
[0.54_985_034, 0.55_509_365, 0.52_561_504, 0.5_570_494, 0.5_593_818, 0.5_263_979, 0.50_285_643, 0.5_069_846, 0.51_196_736] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
'''simple docstring'''
def a__ (self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a__ (self ):
'''simple docstring'''
lowerCamelCase__ : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_img2img_robotcat_fp16.npy' )
lowerCamelCase__ : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/kandinsky/cat.png' )
lowerCamelCase__ : Any = init_image.resize((5_1_2, 5_1_2) )
lowerCamelCase__ : List[str] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
lowerCamelCase__ : Any = torch.from_numpy(np.array(lowerCamelCase_ ) ).float() / 255.0
lowerCamelCase__ : Optional[int] = hint.permute(2, 0, 1 ).unsqueeze(0 )
lowerCamelCase__ : Union[str, Any] = 'A robot, 4k photo'
lowerCamelCase__ : Any = KandinskyVaaPriorEmbaEmbPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior', torch_dtype=torch.floataa )
pipe_prior.to(lowerCamelCase_ )
lowerCamelCase__ : List[Any] = KandinskyVaaControlnetImgaImgPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth', torch_dtype=torch.floataa )
lowerCamelCase__ : int = pipeline.to(lowerCamelCase_ )
pipeline.set_progress_bar_config(disable=lowerCamelCase_ )
lowerCamelCase__ : str = torch.Generator(device='cpu' ).manual_seed(0 )
lowerCamelCase__ : Optional[Any] = pipe_prior(
lowerCamelCase_, image=lowerCamelCase_, strength=0.85, generator=lowerCamelCase_, negative_prompt='', ).to_tuple()
lowerCamelCase__ : Union[str, Any] = pipeline(
image=lowerCamelCase_, image_embeds=lowerCamelCase_, negative_image_embeds=lowerCamelCase_, hint=lowerCamelCase_, generator=lowerCamelCase_, num_inference_steps=1_0_0, height=5_1_2, width=5_1_2, strength=0.5, output_type='np', )
lowerCamelCase__ : Dict = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert_mean_pixel_difference(lowerCamelCase_, lowerCamelCase_ )
| 714 |
"""simple docstring"""
def lowerCamelCase_ ( _lowerCamelCase , _lowerCamelCase ):
if mass < 0:
raise ValueError('The mass of a body cannot be negative' )
return 0.5 * mass * abs(_lowerCamelCase ) * abs(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 696 | 0 |
import functools
from typing import Any
def _lowercase ( __UpperCamelCase : str , __UpperCamelCase : list[str] ):
# Validation
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or len(__UpperCamelCase ) == 0:
raise ValueError("""the string should be not empty string""" )
if not isinstance(__UpperCamelCase , __UpperCamelCase ) or not all(
isinstance(__UpperCamelCase , __UpperCamelCase ) and len(__UpperCamelCase ) > 0 for item in words ):
raise ValueError("""the words should be a list of non-empty strings""" )
# Build trie
snake_case__ = {}
snake_case__ = """WORD_KEEPER"""
for word in words:
snake_case__ = trie
for c in word:
if c not in trie_node:
snake_case__ = {}
snake_case__ = trie_node[c]
snake_case__ = True
snake_case__ = len(__UpperCamelCase )
# Dynamic programming method
@functools.cache
def is_breakable(__UpperCamelCase : int ) -> bool:
if index == len_string:
return True
snake_case__ = trie
for i in range(__UpperCamelCase , __UpperCamelCase ):
snake_case__ = trie_node.get(string[i] , __UpperCamelCase )
if trie_node is None:
return False
if trie_node.get(__UpperCamelCase , __UpperCamelCase ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 214 |
from math import factorial
class SCREAMING_SNAKE_CASE__ :
'''simple docstring'''
def __init__( self : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] ) -> Tuple:
snake_case__ = real
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case__ = [1] * rank
else:
snake_case__ = rank
def __repr__( self : int ) -> Union[str, Any]:
return (
f'''{self.real}+'''
f'''{'+'.join(str(lowerCAmelCase__ )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}'''
)
def UpperCAmelCase_ ( self : str ) -> Dict:
snake_case__ = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , lowerCAmelCase__ )
def __add__( self : List[Any] , lowerCAmelCase__ : Optional[Any] ) -> Union[str, Any]:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return Dual(self.real + other , self.duals )
snake_case__ = self.duals.copy()
snake_case__ = other.duals.copy()
if len(lowerCAmelCase__ ) > len(lowerCAmelCase__ ):
o_dual.extend([1] * (len(lowerCAmelCase__ ) - len(lowerCAmelCase__ )) )
elif len(lowerCAmelCase__ ) < len(lowerCAmelCase__ ):
s_dual.extend([1] * (len(lowerCAmelCase__ ) - len(lowerCAmelCase__ )) )
snake_case__ = []
for i in range(len(lowerCAmelCase__ ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , lowerCAmelCase__ )
UpperCamelCase__ : int = __add__
def __sub__( self : Optional[Any] , lowerCAmelCase__ : Union[str, Any] ) -> Optional[Any]:
return self + other * -1
def __mul__( self : Tuple , lowerCAmelCase__ : List[str] ) -> str:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case__ = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , lowerCAmelCase__ )
snake_case__ = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , lowerCAmelCase__ )
UpperCamelCase__ : int = __mul__
def __truediv__( self : Dict , lowerCAmelCase__ : Tuple ) -> List[str]:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case__ = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , lowerCAmelCase__ )
raise ValueError
def __floordiv__( self : int , lowerCAmelCase__ : Union[str, Any] ) -> Optional[Any]:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case__ = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , lowerCAmelCase__ )
raise ValueError
def __pow__( self : int , lowerCAmelCase__ : Optional[int] ) -> int:
if n < 0 or isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError("""power must be a positive integer""" )
if n == 0:
return 1
if n == 1:
return self
snake_case__ = self
for _ in range(n - 1 ):
x *= self
return x
def _lowercase ( __UpperCamelCase : List[str] , __UpperCamelCase : List[str] , __UpperCamelCase : int ):
if not callable(__UpperCamelCase ):
raise ValueError("""differentiate() requires a function as input for func""" )
if not isinstance(__UpperCamelCase , (float, int) ):
raise ValueError("""differentiate() requires a float as input for position""" )
if not isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError("""differentiate() requires an int as input for order""" )
snake_case__ = Dual(__UpperCamelCase , 1 )
snake_case__ = func(__UpperCamelCase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(__UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def _lowercase ( __UpperCamelCase : Optional[Any] ):
return y**2 * y**4
print(differentiate(f, 9, 2))
| 214 | 1 |
def lowerCamelCase ( UpperCAmelCase_ : Tuple , UpperCAmelCase_ : Union[str, Any] , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Any )-> int:
"""simple docstring"""
a =[False] * len(UpperCAmelCase_ )
a =[]
queue.append(UpperCAmelCase_ )
a =True
while queue:
a =queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(UpperCAmelCase_ )
a =True
a =u
return visited[t]
def lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : List[str] , UpperCAmelCase_ : Any )-> Optional[int]:
"""simple docstring"""
a =[-1] * (len(UpperCAmelCase_ ))
a =0
while bfs(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
a =float("""Inf""" )
a =sink
while s != source:
# Find the minimum value in select path
a =min(UpperCAmelCase_ , graph[parent[s]][s] )
a =parent[s]
max_flow += path_flow
a =sink
while v != source:
a =parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
a =parent[v]
return max_flow
_lowerCamelCase = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
_lowerCamelCase , _lowerCamelCase = 0, 5
print(ford_fulkerson(graph, source, sink))
| 713 |
import inspect
import unittest
from transformers import ViTMSNConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMSNForImageClassification, ViTMSNModel
from transformers.models.vit_msn.modeling_vit_msn import VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class UpperCAmelCase__ :
'''simple docstring'''
def __init__( self , _lowerCAmelCase , _lowerCAmelCase=13 , _lowerCAmelCase=30 , _lowerCAmelCase=2 , _lowerCAmelCase=3 , _lowerCAmelCase=True , _lowerCAmelCase=True , _lowerCAmelCase=32 , _lowerCAmelCase=5 , _lowerCAmelCase=4 , _lowerCAmelCase=37 , _lowerCAmelCase="gelu" , _lowerCAmelCase=0.1 , _lowerCAmelCase=0.1 , _lowerCAmelCase=10 , _lowerCAmelCase=0.02 , _lowerCAmelCase=None , ):
a =parent
a =batch_size
a =image_size
a =patch_size
a =num_channels
a =is_training
a =use_labels
a =hidden_size
a =num_hidden_layers
a =num_attention_heads
a =intermediate_size
a =hidden_act
a =hidden_dropout_prob
a =attention_probs_dropout_prob
a =type_sequence_label_size
a =initializer_range
a =scope
# in ViT MSN, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
a =(image_size // patch_size) ** 2
a =num_patches + 1
def lowerCAmelCase__ ( self ):
a =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a =None
if self.use_labels:
a =ids_tensor([self.batch_size] , self.type_sequence_label_size )
a =self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ ( self ):
return ViTMSNConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
a =ViTMSNModel(config=_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
a =model(_lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase ):
a =self.type_sequence_label_size
a =ViTMSNForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
a =model(_lowerCAmelCase , labels=_lowerCAmelCase )
print("""Pixel and labels shape: {pixel_values.shape}, {labels.shape}""" )
print("""Labels: {labels}""" )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
a =1
a =ViTMSNForImageClassification(_lowerCAmelCase )
model.to(_lowerCAmelCase )
model.eval()
a =floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
a =model(_lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__ ( self ):
a =self.prepare_config_and_inputs()
a , a , a =config_and_inputs
a ={"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase__ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
_SCREAMING_SNAKE_CASE : int = (ViTMSNModel, ViTMSNForImageClassification) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE : str = (
{"feature-extraction": ViTMSNModel, "image-classification": ViTMSNForImageClassification}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE : Tuple = False
_SCREAMING_SNAKE_CASE : str = False
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : List[str] = False
def lowerCAmelCase__ ( self ):
a =ViTMSNModelTester(self )
a =ConfigTester(self , config_class=_lowerCAmelCase , has_text_modality=_lowerCAmelCase , hidden_size=37 )
def lowerCAmelCase__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMSN does not use inputs_embeds""" )
def lowerCAmelCase__ ( self ):
pass
def lowerCAmelCase__ ( self ):
a , a =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a =model_class(_lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
a =model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_lowerCAmelCase , nn.Linear ) )
def lowerCAmelCase__ ( self ):
a , a =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a =model_class(_lowerCAmelCase )
a =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a =[*signature.parameters.keys()]
a =["""pixel_values"""]
self.assertListEqual(arg_names[:1] , _lowerCAmelCase )
def lowerCAmelCase__ ( self ):
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowerCAmelCase )
def lowerCAmelCase__ ( self ):
a =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowerCAmelCase )
@slow
def lowerCAmelCase__ ( self ):
for model_name in VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a =ViTMSNModel.from_pretrained(_lowerCAmelCase )
self.assertIsNotNone(_lowerCAmelCase )
def lowerCamelCase ( )-> Dict:
"""simple docstring"""
a =Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class UpperCAmelCase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCAmelCase__ ( self ):
return ViTImageProcessor.from_pretrained("""facebook/vit-msn-small""" ) if is_vision_available() else None
@slow
def lowerCAmelCase__ ( self ):
torch.manual_seed(2 )
a =ViTMSNForImageClassification.from_pretrained("""facebook/vit-msn-small""" ).to(_lowerCAmelCase )
a =self.default_image_processor
a =prepare_img()
a =image_processor(images=_lowerCAmelCase , return_tensors="""pt""" ).to(_lowerCAmelCase )
# forward pass
with torch.no_grad():
a =model(**_lowerCAmelCase )
# verify the logits
a =torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , _lowerCAmelCase )
a =torch.tensor([-0.08_03, -0.44_54, -0.23_75] ).to(_lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _lowerCAmelCase , atol=1E-4 ) )
| 321 | 0 |
import argparse
import json
import os
from collections import OrderedDict
import numpy as np
import tensorflow as tf
import torch
def lowerCamelCase_ ( UpperCAmelCase__ ):
"""simple docstring"""
a_ = os.path.join(args.tf_model_dir , """parameters.json""" )
a_ = json.loads(open(UpperCAmelCase__ ).read() )
if not params:
raise ValueError(
F"It seems that the json file at {parameter_file} is empty. Make sure you have a correct json file." )
if not args.output.endswith(""".pt""" ):
a_ = args.output + """.pt"""
a_ = OrderedDict()
with tf.device("""/CPU:0""" ):
a_ = tf.train.load_checkpoint(args.tf_model_dir )
a_ = reader.get_variable_to_shape_map()
for key_name in shapes.keys():
a_ = reader.get_tensor(UpperCAmelCase__ ).astype(np.floataa )
if key_name.endswith("""/adam_m""" ) or key_name.endswith("""/adam_v""" ):
continue
if key_name.startswith("""pasts/""" ):
if key_name.startswith("""pasts/mlp""" ):
a_ = int(key_name[9] )
elif key_name.startswith("""pasts/out""" ):
a_ = 8
a_ = """model.sqout.%d.weight""" % (player * 2) # enter to nn.Sequencial with Tanh, so 2 at a time
a_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
a_ = torch.tensor(UpperCAmelCase__ )
elif key_name.startswith("""model/moe""" ):
a_ = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/switch_gating/kernel""" ):
a_ = """model.blocks.%d.feed_forward.mlp.router.classifier.weight""" % player
a_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
a_ = torch.tensor(UpperCAmelCase__ )
elif key_name.endswith("""/softmlp/kernel""" ):
a_ = """model.blocks.%d.feed_forward.soft_bypass_mlp.weight""" % player
a_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
a_ = torch.tensor(UpperCAmelCase__ )
elif key_name.endswith("""/wo/kernel""" ) or key_name.endswith("""/wi/kernel""" ):
a_ = key_name[-9:-7]
for i in range(16 ):
a_ = """model.blocks.%d.feed_forward.mlp.experts.expert_%d.%s.weight""" % (player, i, nlayer)
a_ = (
vnp[i].transpose([1, 0] ).copy()
) # In Mesh-Tensorflow, it is one array, so it is divided
a_ = torch.tensor(UpperCAmelCase__ )
elif key_name.startswith("""model/mlp""" ):
a_ = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/p1/kernel""" ):
a_ = """model.blocks.%d.feed_forward.mlp.wi.weight""" % player
a_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
a_ = torch.tensor(UpperCAmelCase__ )
elif key_name.endswith("""/p1/bias""" ):
a_ = """model.blocks.%d.feed_forward.mlp.wi.bias""" % player
a_ = vnp.copy() # same because it is one dimensional
a_ = torch.tensor(UpperCAmelCase__ )
elif key_name.endswith("""/p2/kernel""" ):
a_ = """model.blocks.%d.feed_forward.mlp.wo.weight""" % player
a_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
a_ = torch.tensor(UpperCAmelCase__ )
elif key_name.endswith("""/p2/bias""" ):
a_ = """model.blocks.%d.feed_forward.mlp.wo.bias""" % player
a_ = vnp.copy() # same because it is one dimensional
a_ = torch.tensor(UpperCAmelCase__ )
elif key_name.startswith("""model/ln""" ):
a_ = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
a_ = """model.blocks.%d.feed_forward.norm.bias""" % player
a_ = vnp.copy() # same because it is one dimensional
a_ = torch.tensor(UpperCAmelCase__ )
elif key_name.endswith("""/g""" ):
a_ = """model.blocks.%d.feed_forward.norm.weight""" % player
a_ = vnp.copy() # same because it is one dimensional
a_ = torch.tensor(UpperCAmelCase__ )
elif key_name.startswith("""model/att""" ):
a_ = int(key_name[9:].split("""/""" )[0] )
if key_name.endswith("""/qkv/kernel""" ):
a_ = vnp.copy() # Compute same dimension as Mesh-tensorflow using einsum
a_ = state[:, 0, :, :]
a_ = state[:, 1, :, :]
a_ = state[:, 2, :, :]
a_ = (
state_q.reshape([state_q.shape[0], state_q.shape[1] * state_q.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
a_ = (
state_k.reshape([state_k.shape[0], state_k.shape[1] * state_k.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
a_ = (
state_v.reshape([state_v.shape[0], state_v.shape[1] * state_v.shape[2]] )
.transpose([1, 0] )
.copy()
) # Mesh-Tensorflow is a diagonal matrix
a_ = """model.blocks.%d.self_attn.self_attn.q_proj.weight""" % player
a_ = torch.tensor(UpperCAmelCase__ )
a_ = """model.blocks.%d.self_attn.self_attn.k_proj.weight""" % player
a_ = torch.tensor(UpperCAmelCase__ )
a_ = """model.blocks.%d.self_attn.self_attn.v_proj.weight""" % player
a_ = torch.tensor(UpperCAmelCase__ )
elif key_name.endswith("""/o/kernel""" ):
a_ = """model.blocks.%d.self_attn.self_attn.out_proj.weight""" % player
a_ = (
vnp.reshape([vnp.shape[0] * vnp.shape[1], vnp.shape[2]] ).transpose([1, 0] ).copy()
) # Mesh-Tensorflow is a diagonal matrix
a_ = torch.tensor(UpperCAmelCase__ )
elif key_name.startswith("""model/an""" ):
a_ = int(key_name[8:].split("""/""" )[0] )
if key_name.endswith("""/b""" ):
a_ = """model.blocks.%d.self_attn.norm.bias""" % player
a_ = vnp.copy() # same because it is one dimensional
a_ = torch.tensor(UpperCAmelCase__ )
elif key_name.endswith("""/g""" ):
a_ = """model.blocks.%d.self_attn.norm.weight""" % player
a_ = vnp.copy() # same because it is one dimensional
a_ = torch.tensor(UpperCAmelCase__ )
elif (
key_name.startswith("""model/wte""" )
or key_name.startswith("""model/wpe""" )
or key_name.startswith("""model/ete""" )
):
a_ = {"""wte""": """embed_tokens""", """wpe""": """position_embeddings""", """ete""": """extra_position_embeddings"""}[
key_name[-3:]
]
a_ = """model.%s.weight""" % nlayer
a_ = vnp.copy() # same in embedded
a_ = torch.tensor(UpperCAmelCase__ )
if key_name.startswith("""model/wte""" ):
a_ = """lm_head.weight"""
a_ = vnp.copy() # same in embedded
a_ = torch.tensor(UpperCAmelCase__ )
elif key_name.startswith("""model/wob""" ):
a_ = """final_logits_bias"""
a_ = vnp.copy() # same in embedded
a_ = state.reshape((1, -1) )
a_ = torch.tensor(UpperCAmelCase__ )
elif key_name == "model/dense/kernel":
a_ = """model.last_project.weight"""
a_ = vnp.transpose([1, 0] ).copy() # Mesh-Tensorflow is a diagonal matrix
a_ = torch.tensor(UpperCAmelCase__ )
elif key_name == "model/dense_1/bias":
a_ = """model.last_project.bias"""
a_ = vnp.copy() # same because it is one dimensional
a_ = torch.tensor(UpperCAmelCase__ )
torch.save(UpperCAmelCase__ , args.output )
if __name__ == "__main__":
A_ : Union[str, Any] =argparse.ArgumentParser(
description="""model converter.""", formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument("""--tf_model_dir""", metavar="""PATH""", type=str, required=True, help="""import model""")
parser.add_argument("""--output""", metavar="""PATH""", type=str, required=True, help="""output model""")
A_ : int =parser.parse_args()
convert_tf_gptsan_to_pt(args) | 483 |
from dataclasses import asdict, dataclass
from typing import Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A_ : Any =logging.get_logger(__name__)
# TODO Update this
A_ : List[str] ={
"""facebook/esm-1b""": """https://huggingface.co/facebook/esm-1b/resolve/main/config.json""",
# See all ESM models at https://huggingface.co/models?filter=esm
}
class lowercase_ ( UpperCamelCase__):
"""simple docstring"""
snake_case_ = '''esm'''
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=768 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=3_072 , _UpperCAmelCase=0.1 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1_026 , _UpperCAmelCase=0.0_2 , _UpperCAmelCase=1e-1_2 , _UpperCAmelCase="absolute" , _UpperCAmelCase=True , _UpperCAmelCase=None , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=None , _UpperCAmelCase=None , **_UpperCAmelCase , ):
"""simple docstring"""
super().__init__(pad_token_id=_UpperCAmelCase , mask_token_id=_UpperCAmelCase , **_UpperCAmelCase )
a_ = vocab_size
a_ = hidden_size
a_ = num_hidden_layers
a_ = num_attention_heads
a_ = intermediate_size
a_ = hidden_dropout_prob
a_ = attention_probs_dropout_prob
a_ = max_position_embeddings
a_ = initializer_range
a_ = layer_norm_eps
a_ = position_embedding_type
a_ = use_cache
a_ = emb_layer_norm_before
a_ = token_dropout
a_ = is_folding_model
if is_folding_model:
if esmfold_config is None:
logger.info("""No esmfold_config supplied for folding model, using default values.""" )
a_ = EsmFoldConfig()
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
a_ = EsmFoldConfig(**_UpperCAmelCase )
a_ = esmfold_config
if vocab_list is None:
logger.warning("""No vocab_list supplied for folding model, assuming the ESM-2 vocabulary!""" )
a_ = get_default_vocab_list()
else:
a_ = vocab_list
else:
a_ = None
a_ = None
if self.esmfold_config is not None and getattr(self.esmfold_config , """use_esm_attn_map""" , _UpperCAmelCase ):
raise ValueError("""The HuggingFace port of ESMFold does not support use_esm_attn_map at this time!""" )
def lowercase__ ( self ):
"""simple docstring"""
a_ = super().to_dict()
if isinstance(self.esmfold_config , _UpperCAmelCase ):
a_ = self.esmfold_config.to_dict()
return output
@dataclass
class lowercase_ :
"""simple docstring"""
snake_case_ = None
snake_case_ = True
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = 0
snake_case_ = True
snake_case_ = False
snake_case_ = 1_28
snake_case_ = None
def lowercase__ ( self ):
"""simple docstring"""
if self.trunk is None:
a_ = TrunkConfig()
elif isinstance(self.trunk , _UpperCAmelCase ):
a_ = TrunkConfig(**self.trunk )
def lowercase__ ( self ):
"""simple docstring"""
a_ = asdict(self )
a_ = self.trunk.to_dict()
return output
@dataclass
class lowercase_ :
"""simple docstring"""
snake_case_ = 48
snake_case_ = 10_24
snake_case_ = 1_28
snake_case_ = 32
snake_case_ = 32
snake_case_ = 32
snake_case_ = 0
snake_case_ = 0
snake_case_ = False
snake_case_ = 4
snake_case_ = 1_28
snake_case_ = None
def lowercase__ ( self ):
"""simple docstring"""
if self.structure_module is None:
a_ = StructureModuleConfig()
elif isinstance(self.structure_module , _UpperCAmelCase ):
a_ = StructureModuleConfig(**self.structure_module )
if self.max_recycles <= 0:
raise ValueError(f"`max_recycles` should be positive, got {self.max_recycles}." )
if self.sequence_state_dim % self.sequence_state_dim != 0:
raise ValueError(
"""`sequence_state_dim` should be a round multiple of `sequence_state_dim`, got"""
f" {self.sequence_state_dim} and {self.sequence_state_dim}." )
if self.pairwise_state_dim % self.pairwise_state_dim != 0:
raise ValueError(
"""`pairwise_state_dim` should be a round multiple of `pairwise_state_dim`, got"""
f" {self.pairwise_state_dim} and {self.pairwise_state_dim}." )
a_ = self.sequence_state_dim // self.sequence_head_width
a_ = self.pairwise_state_dim // self.pairwise_head_width
if self.sequence_state_dim != sequence_num_heads * self.sequence_head_width:
raise ValueError(
"""`sequence_state_dim` should be equal to `sequence_num_heads * sequence_head_width, got"""
f" {self.sequence_state_dim} != {sequence_num_heads} * {self.sequence_head_width}." )
if self.pairwise_state_dim != pairwise_num_heads * self.pairwise_head_width:
raise ValueError(
"""`pairwise_state_dim` should be equal to `pairwise_num_heads * pairwise_head_width, got"""
f" {self.pairwise_state_dim} != {pairwise_num_heads} * {self.pairwise_head_width}." )
if self.pairwise_state_dim % 2 != 0:
raise ValueError(f"`pairwise_state_dim` should be even, got {self.pairwise_state_dim}." )
if self.dropout >= 0.4:
raise ValueError(f"`dropout` should not be greater than 0.4, got {self.dropout}." )
def lowercase__ ( self ):
"""simple docstring"""
a_ = asdict(self )
a_ = self.structure_module.to_dict()
return output
@dataclass
class lowercase_ :
"""simple docstring"""
snake_case_ = 3_84
snake_case_ = 1_28
snake_case_ = 16
snake_case_ = 1_28
snake_case_ = 12
snake_case_ = 4
snake_case_ = 8
snake_case_ = 0.1
snake_case_ = 8
snake_case_ = 1
snake_case_ = 2
snake_case_ = 7
snake_case_ = 10
snake_case_ = 1E-8
snake_case_ = 1E5
def lowercase__ ( self ):
"""simple docstring"""
return asdict(self )
def lowerCamelCase_ ( ):
"""simple docstring"""
return (
"<cls>",
"<pad>",
"<eos>",
"<unk>",
"L",
"A",
"G",
"V",
"S",
"E",
"R",
"T",
"I",
"D",
"P",
"K",
"Q",
"N",
"F",
"Y",
"M",
"H",
"W",
"C",
"X",
"B",
"U",
"Z",
"O",
".",
"-",
"<null_1>",
"<mask>",
) | 483 | 1 |
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
__snake_case = True
except (ImportError, ModuleNotFoundError):
__snake_case = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def __lowerCAmelCase ( lowercase : str ) -> str:
"""simple docstring"""
re.sub("<n>" , "" , lowercase ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(lowercase ) )
| 117 |
"""simple docstring"""
from __future__ import annotations
__snake_case = list[tuple[int, int]]
__snake_case = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__snake_case = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class _lowerCAmelCase :
def __init__( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , ) -> str:
'''simple docstring'''
snake_case : int = pos_x
snake_case : List[str] = pos_y
snake_case : List[Any] = (pos_y, pos_x)
snake_case : Optional[int] = goal_x
snake_case : Dict = goal_y
snake_case : Any = g_cost
snake_case : List[Any] = parent
snake_case : Union[str, Any] = self.calculate_heuristic()
def lowerCamelCase ( self ) -> float:
'''simple docstring'''
snake_case : Optional[Any] = abs(self.pos_x - self.goal_x )
snake_case : Dict = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , UpperCamelCase__ ) -> bool:
'''simple docstring'''
return self.f_cost < other.f_cost
class _lowerCAmelCase :
def __init__( self , UpperCamelCase__ , UpperCamelCase__ ) -> List[Any]:
'''simple docstring'''
snake_case : int = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , UpperCamelCase__ )
snake_case : List[Any] = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9999 , UpperCamelCase__ )
snake_case : Tuple = [self.start]
snake_case : list[Node] = []
snake_case : Dict = False
def lowerCamelCase ( self ) -> Path | None:
'''simple docstring'''
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
snake_case : str = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
snake_case : Tuple = True
return self.retrace_path(UpperCamelCase__ )
self.closed_nodes.append(UpperCamelCase__ )
snake_case : Optional[Any] = self.get_successors(UpperCamelCase__ )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(UpperCamelCase__ )
else:
# retrieve the best current path
snake_case : Dict = self.open_nodes.pop(self.open_nodes.index(UpperCamelCase__ ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(UpperCamelCase__ )
else:
self.open_nodes.append(UpperCamelCase__ )
if not self.reached:
return [self.start.pos]
return None
def lowerCamelCase ( self , UpperCamelCase__ ) -> list[Node]:
'''simple docstring'''
snake_case : Dict = []
for action in delta:
snake_case : Union[str, Any] = parent.pos_x + action[1]
snake_case : str = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(UpperCamelCase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
UpperCamelCase__ , UpperCamelCase__ , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , UpperCamelCase__ , ) )
return successors
def lowerCamelCase ( self , UpperCamelCase__ ) -> Path:
'''simple docstring'''
snake_case : Optional[int] = node
snake_case : int = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
snake_case : Any = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
__snake_case = (0, 0)
__snake_case = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("""------""")
__snake_case = GreedyBestFirst(init, goal)
__snake_case = greedy_bf.search()
if path:
for pos_x, pos_y in path:
__snake_case = 2
for elem in grid:
print(elem)
| 117 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class __UpperCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def snake_case_ ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def snake_case_ ( self ):
__a = StableDiffusionKDiffusionPipeline.from_pretrained("""CompVis/stable-diffusion-v1-4""" )
__a = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
sd_pipe.set_scheduler("""sample_euler""" )
__a = '''A painting of a squirrel eating a burger'''
__a = torch.manual_seed(0 )
__a = sd_pipe([prompt] , generator=_A , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" )
__a = output.images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__a = np.array([0.0447, 0.0492, 0.0468, 0.0408, 0.0383, 0.0408, 0.0354, 0.0380, 0.0339] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def snake_case_ ( self ):
__a = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
__a = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
sd_pipe.set_scheduler("""sample_euler""" )
__a = '''A painting of a squirrel eating a burger'''
__a = torch.manual_seed(0 )
__a = sd_pipe([prompt] , generator=_A , guidance_scale=9.0 , num_inference_steps=20 , output_type="""np""" )
__a = output.images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__a = np.array([0.1237, 0.1320, 0.1438, 0.1359, 0.1390, 0.1132, 0.1277, 0.1175, 0.1112] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-1
def snake_case_ ( self ):
__a = StableDiffusionKDiffusionPipeline.from_pretrained("""stabilityai/stable-diffusion-2-1-base""" )
__a = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
sd_pipe.set_scheduler("""sample_dpmpp_2m""" )
__a = '''A painting of a squirrel eating a burger'''
__a = torch.manual_seed(0 )
__a = sd_pipe(
[prompt] , generator=_A , guidance_scale=7.5 , num_inference_steps=15 , output_type="""np""" , use_karras_sigmas=_A , )
__a = output.images
__a = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
__a = np.array(
[0.11381689, 0.12112921, 0.1389457, 0.12549606, 0.1244964, 0.10831517, 0.11562866, 0.10867816, 0.10499048] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
| 99 |
'''simple docstring'''
from __future__ import annotations
import copy
import inspect
import unittest
import numpy as np
from transformers import is_tf_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_tf, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
LayoutLMvaConfig,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
)
if is_vision_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class lowerCamelCase_ :
def __init__( self : Optional[int] , _A : Optional[Any] , _A : Tuple=2 , _A : Tuple=3 , _A : Optional[Any]=4 , _A : List[Any]=2 , _A : List[Any]=7 , _A : int=True , _A : Dict=True , _A : int=True , _A : Dict=True , _A : Tuple=99 , _A : Union[str, Any]=36 , _A : int=2 , _A : List[str]=4 , _A : int=37 , _A : List[Any]="gelu" , _A : str=0.1 , _A : str=0.1 , _A : Tuple=512 , _A : Dict=16 , _A : Tuple=2 , _A : Union[str, Any]=0.0_2 , _A : Any=6 , _A : Union[str, Any]=6 , _A : str=3 , _A : str=4 , _A : Tuple=None , _A : int=1_000 , ):
'''simple docstring'''
UpperCAmelCase__ : int = parent
UpperCAmelCase__ : Optional[int] = batch_size
UpperCAmelCase__ : str = num_channels
UpperCAmelCase__ : str = image_size
UpperCAmelCase__ : List[str] = patch_size
UpperCAmelCase__ : Any = is_training
UpperCAmelCase__ : List[str] = use_input_mask
UpperCAmelCase__ : Tuple = use_token_type_ids
UpperCAmelCase__ : str = use_labels
UpperCAmelCase__ : int = vocab_size
UpperCAmelCase__ : List[Any] = hidden_size
UpperCAmelCase__ : Optional[int] = num_hidden_layers
UpperCAmelCase__ : List[str] = num_attention_heads
UpperCAmelCase__ : Tuple = intermediate_size
UpperCAmelCase__ : Dict = hidden_act
UpperCAmelCase__ : int = hidden_dropout_prob
UpperCAmelCase__ : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase__ : List[str] = max_position_embeddings
UpperCAmelCase__ : Tuple = type_vocab_size
UpperCAmelCase__ : Any = type_sequence_label_size
UpperCAmelCase__ : List[str] = initializer_range
UpperCAmelCase__ : List[str] = coordinate_size
UpperCAmelCase__ : Tuple = shape_size
UpperCAmelCase__ : Optional[int] = num_labels
UpperCAmelCase__ : Optional[Any] = num_choices
UpperCAmelCase__ : Union[str, Any] = scope
UpperCAmelCase__ : Optional[Any] = range_bbox
# LayoutLMv3's sequence length equals the number of text tokens + number of patches + 1 (we add 1 for the CLS token)
UpperCAmelCase__ : str = text_seq_length
UpperCAmelCase__ : Tuple = (image_size // patch_size) ** 2 + 1
UpperCAmelCase__ : Tuple = self.text_seq_length + self.image_seq_length
def lowercase_ ( self : Union[str, Any] ):
'''simple docstring'''
UpperCAmelCase__ : Dict = ids_tensor([self.batch_size, self.text_seq_length] , self.vocab_size )
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length, 4] , self.range_bbox )
UpperCAmelCase__ : int = bbox.numpy()
# Ensure that bbox is legal
for i in range(bbox.shape[0] ):
for j in range(bbox.shape[1] ):
if bbox[i, j, 3] < bbox[i, j, 1]:
UpperCAmelCase__ : str = bbox[i, j, 3]
UpperCAmelCase__ : Dict = bbox[i, j, 1]
UpperCAmelCase__ : str = tmp_coordinate
if bbox[i, j, 2] < bbox[i, j, 0]:
UpperCAmelCase__ : Optional[int] = bbox[i, j, 2]
UpperCAmelCase__ : Any = bbox[i, j, 0]
UpperCAmelCase__ : List[Any] = tmp_coordinate
UpperCAmelCase__ : str = tf.constant(_A )
UpperCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase__ : Any = None
if self.use_input_mask:
UpperCAmelCase__ : Any = random_attention_mask([self.batch_size, self.text_seq_length] )
UpperCAmelCase__ : Any = None
if self.use_token_type_ids:
UpperCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.text_seq_length] , self.type_vocab_size )
UpperCAmelCase__ : Optional[int] = None
UpperCAmelCase__ : List[str] = None
if self.use_labels:
UpperCAmelCase__ : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.text_seq_length] , self.num_labels )
UpperCAmelCase__ : Optional[int] = LayoutLMvaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , coordinate_size=self.coordinate_size , shape_size=self.shape_size , input_size=self.image_size , patch_size=self.patch_size , )
return config, input_ids, bbox, pixel_values, token_type_ids, input_mask, sequence_labels, token_labels
def lowercase_ ( self : Union[str, Any] , _A : int , _A : str , _A : Optional[int] , _A : Optional[int] , _A : List[str] , _A : List[Any] ):
'''simple docstring'''
UpperCAmelCase__ : int = TFLayoutLMvaModel(config=_A )
# text + image
UpperCAmelCase__ : Tuple = model(_A , pixel_values=_A , training=_A )
UpperCAmelCase__ : Tuple = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , training=_A , )
UpperCAmelCase__ : Optional[Any] = model(_A , bbox=_A , pixel_values=_A , training=_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# text only
UpperCAmelCase__ : Any = model(_A , training=_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.text_seq_length, self.hidden_size) )
# image only
UpperCAmelCase__ : str = model({'''pixel_values''': pixel_values} , training=_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.image_seq_length, self.hidden_size) )
def lowercase_ ( self : Union[str, Any] , _A : Optional[int] , _A : Optional[Any] , _A : Dict , _A : List[Any] , _A : List[Any] , _A : Any , _A : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : Optional[int] = self.num_labels
UpperCAmelCase__ : int = TFLayoutLMvaForSequenceClassification(config=_A )
UpperCAmelCase__ : Union[str, Any] = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , training=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowercase_ ( self : Dict , _A : List[Any] , _A : Any , _A : Dict , _A : str , _A : Optional[int] , _A : str , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = self.num_labels
UpperCAmelCase__ : Union[str, Any] = TFLayoutLMvaForTokenClassification(config=_A )
UpperCAmelCase__ : Optional[int] = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , labels=_A , training=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.text_seq_length, self.num_labels) )
def lowercase_ ( self : Dict , _A : Dict , _A : List[str] , _A : Union[str, Any] , _A : int , _A : Tuple , _A : Dict , _A : str ):
'''simple docstring'''
UpperCAmelCase__ : str = 2
UpperCAmelCase__ : Dict = TFLayoutLMvaForQuestionAnswering(config=_A )
UpperCAmelCase__ : str = model(
_A , bbox=_A , pixel_values=_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , training=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
UpperCAmelCase__ : int = self.prepare_config_and_inputs()
((UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__) , (UpperCAmelCase__)) : List[str] = config_and_inputs
UpperCAmelCase__ : List[Any] = {
'''input_ids''': input_ids,
'''bbox''': bbox,
'''pixel_values''': pixel_values,
'''token_type_ids''': token_type_ids,
'''attention_mask''': input_mask,
}
return config, inputs_dict
@require_tf
class lowerCamelCase_ ( __a , __a , unittest.TestCase ):
lowerCAmelCase__ = (
(
TFLayoutLMvaModel,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
)
if is_tf_available()
else ()
)
lowerCAmelCase__ = (
{'document-question-answering': TFLayoutLMvaForQuestionAnswering, 'feature-extraction': TFLayoutLMvaModel}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def lowercase_ ( self : List[Any] , _A : Union[str, Any] , _A : str , _A : List[Any] , _A : Dict , _A : List[str] ):
'''simple docstring'''
return True
def lowercase_ ( self : Optional[Any] , _A : Tuple , _A : Any , _A : Dict=False ):
'''simple docstring'''
UpperCAmelCase__ : List[Any] = copy.deepcopy(_A )
if model_class in get_values(_A ):
UpperCAmelCase__ : Tuple = {
k: tf.tile(tf.expand_dims(_A , 1 ) , (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1) )
if isinstance(_A , tf.Tensor ) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in get_values(_A ):
UpperCAmelCase__ : Dict = tf.ones(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_A ):
UpperCAmelCase__ : Tuple = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
UpperCAmelCase__ : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_A ):
UpperCAmelCase__ : Dict = tf.zeros(self.model_tester.batch_size , dtype=tf.intaa )
elif model_class in get_values(_A ):
UpperCAmelCase__ : int = tf.zeros(
(self.model_tester.batch_size, self.model_tester.text_seq_length) , dtype=tf.intaa )
return inputs_dict
def lowercase_ ( self : List[str] ):
'''simple docstring'''
UpperCAmelCase__ : Any = TFLayoutLMvaModelTester(self )
UpperCAmelCase__ : Tuple = ConfigTester(self , config_class=_A , hidden_size=37 )
def lowercase_ ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase__ : Optional[Any] = model_class(_A )
if getattr(_A , '''hf_compute_loss''' , _A ):
# The number of elements in the loss should be the same as the number of elements in the label
UpperCAmelCase__ : Tuple = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : List[Any] = prepared_for_class[
sorted(prepared_for_class.keys() - inputs_dict.keys() , reverse=_A )[0]
]
UpperCAmelCase__ : Optional[Any] = added_label.shape.as_list()[:1]
# Test that model correctly compute the loss with kwargs
UpperCAmelCase__ : Any = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : Tuple = prepared_for_class.pop('''input_ids''' )
UpperCAmelCase__ : List[Any] = model(_A , **_A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss when we mask some positions
UpperCAmelCase__ : Union[str, Any] = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : Tuple = prepared_for_class.pop('''input_ids''' )
if "labels" in prepared_for_class:
UpperCAmelCase__ : Optional[Any] = prepared_for_class['''labels'''].numpy()
if len(labels.shape ) > 1 and labels.shape[1] != 1:
UpperCAmelCase__ : Any = -100
UpperCAmelCase__ : Union[str, Any] = tf.convert_to_tensor(_A )
UpperCAmelCase__ : int = model(_A , **_A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
self.assertTrue(not np.any(np.isnan(loss.numpy() ) ) )
# Test that model correctly compute the loss with a dict
UpperCAmelCase__ : Optional[int] = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
UpperCAmelCase__ : Dict = model(_A )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
# Test that model correctly compute the loss with a tuple
UpperCAmelCase__ : Dict = self._prepare_for_class(inputs_dict.copy() , _A , return_labels=_A )
# Get keys that were added with the _prepare_for_class function
UpperCAmelCase__ : Optional[int] = prepared_for_class.keys() - inputs_dict.keys()
UpperCAmelCase__ : int = inspect.signature(model.call ).parameters
UpperCAmelCase__ : Union[str, Any] = list(signature.keys() )
# Create a dictionary holding the location of the tensors in the tuple
UpperCAmelCase__ : Dict = {0: '''input_ids'''}
for label_key in label_keys:
UpperCAmelCase__ : str = signature_names.index(_A )
UpperCAmelCase__ : List[Any] = label_key
UpperCAmelCase__ : Dict = sorted(tuple_index_mapping.items() )
# Initialize a list with their default values, update the values and convert to a tuple
UpperCAmelCase__ : Tuple = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default )
for index, value in sorted_tuple_index_mapping:
UpperCAmelCase__ : Any = prepared_for_class[value]
UpperCAmelCase__ : Tuple = tuple(_A )
# Send to model
UpperCAmelCase__ : Optional[Any] = model(tuple_input[:-1] )[0]
self.assertTrue(loss.shape.as_list() == expected_loss_size or loss.shape.as_list() == [1] )
def lowercase_ ( self : int ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(_A , _A , _A , _A , _A , _A )
def lowercase_ ( self : Tuple ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : int = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase__ : Union[str, Any] = type
self.model_tester.create_and_check_model(_A , _A , _A , _A , _A , _A )
def lowercase_ ( self : List[str] ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(
_A , _A , _A , _A , _A , _A , _A )
def lowercase_ ( self : Any ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(
_A , _A , _A , _A , _A , _A , _A )
def lowercase_ ( self : Optional[int] ):
'''simple docstring'''
(
(
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) , (
UpperCAmelCase__
) ,
) : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(
_A , _A , _A , _A , _A , _A , _A )
@slow
def lowercase_ ( self : List[Any] ):
'''simple docstring'''
for model_name in TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase__ : List[str] = TFLayoutLMvaModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def a__ ( ) -> List[str]:
UpperCAmelCase__ : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_tf
class lowerCamelCase_ ( unittest.TestCase ):
@cached_property
def lowercase_ ( self : Dict ):
'''simple docstring'''
return LayoutLMvaImageProcessor(apply_ocr=_A ) if is_vision_available() else None
@slow
def lowercase_ ( self : int ):
'''simple docstring'''
UpperCAmelCase__ : str = TFLayoutLMvaModel.from_pretrained('''microsoft/layoutlmv3-base''' )
UpperCAmelCase__ : Dict = self.default_image_processor
UpperCAmelCase__ : Any = prepare_img()
UpperCAmelCase__ : int = image_processor(images=_A , return_tensors='''tf''' ).pixel_values
UpperCAmelCase__ : str = tf.constant([[1, 2]] )
UpperCAmelCase__ : Optional[Any] = tf.expand_dims(tf.constant([[1, 2, 3, 4], [5, 6, 7, 8]] ) , axis=0 )
# forward pass
UpperCAmelCase__ : int = model(input_ids=_A , bbox=_A , pixel_values=_A , training=_A )
# verify the logits
UpperCAmelCase__ : Optional[int] = (1, 199, 768)
self.assertEqual(outputs.last_hidden_state.shape , _A )
UpperCAmelCase__ : Dict = tf.constant(
[[-0.0_5_2_9, 0.3_6_1_8, 0.1_6_3_2], [-0.1_5_8_7, -0.1_6_6_7, -0.0_4_0_0], [-0.1_5_5_7, -0.1_6_7_1, -0.0_5_0_5]] )
self.assertTrue(np.allclose(outputs.last_hidden_state[0, :3, :3] , _A , atol=1e-4 ) )
| 75 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a : int = logging.get_logger(__name__)
__a : Tuple = {
"microsoft/wavlm-base": "https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class __lowercase ( lowercase_ ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = "wavlm"
def __init__( self : Optional[int] , UpperCamelCase_ : Optional[int]=32 , UpperCamelCase_ : int=768 , UpperCamelCase_ : Any=12 , UpperCamelCase_ : List[Any]=12 , UpperCamelCase_ : int=3_072 , UpperCamelCase_ : Optional[int]="gelu" , UpperCamelCase_ : List[str]=0.1 , UpperCamelCase_ : Tuple=0.1 , UpperCamelCase_ : str=0.1 , UpperCamelCase_ : Optional[int]=0.0 , UpperCamelCase_ : Tuple=0.1 , UpperCamelCase_ : Any=0.1 , UpperCamelCase_ : Optional[int]=0.02 , UpperCamelCase_ : str=1e-5 , UpperCamelCase_ : str="group" , UpperCamelCase_ : str="gelu" , UpperCamelCase_ : Dict=(512, 512, 512, 512, 512, 512, 512) , UpperCamelCase_ : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , UpperCamelCase_ : Dict=(10, 3, 3, 3, 3, 2, 2) , UpperCamelCase_ : str=False , UpperCamelCase_ : int=128 , UpperCamelCase_ : List[Any]=16 , UpperCamelCase_ : List[Any]=320 , UpperCamelCase_ : List[Any]=800 , UpperCamelCase_ : str=False , UpperCamelCase_ : Dict=True , UpperCamelCase_ : Union[str, Any]=0.05 , UpperCamelCase_ : List[str]=10 , UpperCamelCase_ : Tuple=2 , UpperCamelCase_ : Any=0.0 , UpperCamelCase_ : Optional[int]=10 , UpperCamelCase_ : Union[str, Any]=320 , UpperCamelCase_ : Union[str, Any]=2 , UpperCamelCase_ : Any=0.1 , UpperCamelCase_ : Optional[int]=100 , UpperCamelCase_ : Union[str, Any]=256 , UpperCamelCase_ : Tuple=256 , UpperCamelCase_ : Optional[Any]=0.1 , UpperCamelCase_ : Tuple="mean" , UpperCamelCase_ : Any=False , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : int=256 , UpperCamelCase_ : Dict=(512, 512, 512, 512, 1_500) , UpperCamelCase_ : Optional[Any]=(5, 3, 3, 1, 1) , UpperCamelCase_ : str=(1, 2, 3, 1, 1) , UpperCamelCase_ : str=512 , UpperCamelCase_ : List[str]=80 , UpperCamelCase_ : Optional[Any]=0 , UpperCamelCase_ : List[Any]=1 , UpperCamelCase_ : str=2 , UpperCamelCase_ : Any=False , UpperCamelCase_ : str=3 , UpperCamelCase_ : Optional[int]=2 , UpperCamelCase_ : Dict=3 , UpperCamelCase_ : Union[str, Any]=None , **UpperCamelCase_ : int , ):
"""simple docstring"""
super().__init__(**UpperCamelCase_ , pad_token_id=UpperCamelCase_ , bos_token_id=UpperCamelCase_ , eos_token_id=UpperCamelCase_ )
__A = hidden_size
__A = feat_extract_norm
__A = feat_extract_activation
__A = list(UpperCamelCase_ )
__A = list(UpperCamelCase_ )
__A = list(UpperCamelCase_ )
__A = conv_bias
__A = num_buckets
__A = max_bucket_distance
__A = num_conv_pos_embeddings
__A = num_conv_pos_embedding_groups
__A = len(self.conv_dim )
__A = num_hidden_layers
__A = intermediate_size
__A = hidden_act
__A = num_attention_heads
__A = hidden_dropout
__A = attention_dropout
__A = activation_dropout
__A = feat_proj_dropout
__A = final_dropout
__A = layerdrop
__A = layer_norm_eps
__A = initializer_range
__A = num_ctc_classes
__A = vocab_size
__A = do_stable_layer_norm
__A = use_weighted_layer_sum
__A = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
F" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"
F" `len(config.conv_kernel) = {len(self.conv_kernel )}`." )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__A = apply_spec_augment
__A = mask_time_prob
__A = mask_time_length
__A = mask_time_min_masks
__A = mask_feature_prob
__A = mask_feature_length
# parameters for pretraining with codevector quantized representations
__A = num_codevectors_per_group
__A = num_codevector_groups
__A = contrastive_logits_temperature
__A = num_negatives
__A = codevector_dim
__A = proj_codevector_dim
__A = diversity_loss_weight
# ctc loss
__A = ctc_loss_reduction
__A = ctc_zero_infinity
# adapter
__A = add_adapter
__A = adapter_kernel_size
__A = adapter_stride
__A = num_adapter_layers
__A = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__A = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__A = list(UpperCamelCase_ )
__A = list(UpperCamelCase_ )
__A = list(UpperCamelCase_ )
__A = xvector_output_dim
@property
def lowerCAmelCase_ ( self : str ):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 702 |
from __future__ import annotations
def _SCREAMING_SNAKE_CASE ( __lowercase : list[int] , __lowercase : int ) -> list[list[int]]:
"""simple docstring"""
__A = []
__A = []
__A = 0
__A = sum(__lowercase )
create_state_space_tree(__lowercase , __lowercase , __lowercase , __lowercase , __lowercase , __lowercase )
return result
def _SCREAMING_SNAKE_CASE ( __lowercase : list[int] , __lowercase : int , __lowercase : int , __lowercase : list[int] , __lowercase : list[list[int]] , __lowercase : int , ) -> None:
"""simple docstring"""
if sum(__lowercase ) > max_sum or (remaining_nums_sum + sum(__lowercase )) < max_sum:
return
if sum(__lowercase ) == max_sum:
result.append(__lowercase )
return
for index in range(__lowercase , len(__lowercase ) ):
create_state_space_tree(
__lowercase , __lowercase , index + 1 , [*path, nums[index]] , __lowercase , remaining_nums_sum - nums[index] , )
__a : str = [3, 34, 4, 12, 5, 2]
__a : Optional[Any] = 9
__a : List[str] = generate_sum_of_subsets_soln(nums, max_sum)
print(*result)
| 199 | 0 |
import time
import warnings
from abc import ABC
from copy import deepcopy
from typing import Optional
import torch
from ..utils import add_start_docstrings, logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = r'''
Args:
input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
[`PreTrainedTokenizer.__call__`] for details.
[What are input IDs?](../glossary#input-ids)
scores (`torch.FloatTensor` of shape `(batch_size, config.vocab_size)`):
Prediction scores of a language modeling head. These can be scores for each vocabulary token before SoftMax
or scores for each vocabulary token after SoftMax.
kwargs (`Dict[str, Any]`, *optional*):
Additional stopping criteria specific kwargs.
Return:
`bool`. `False` indicates we should continue, `True` indicates we should stop.
'''
class _lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
@add_start_docstrings(__SCREAMING_SNAKE_CASE )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
raise NotImplementedError('''StoppingCriteria needs to be subclassed''' )
class _lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> Optional[int]:
"""simple docstring"""
snake_case__ : Optional[int] =max_length
snake_case__ : Dict =max_position_embeddings
@add_start_docstrings(__SCREAMING_SNAKE_CASE )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
snake_case__ : str =input_ids.shape[-1]
snake_case__ : Union[str, Any] =cur_len >= self.max_length
if self.max_position_embeddings is not None and not is_done and cur_len >= self.max_position_embeddings:
logger.warning_once(
'''This is a friendly reminder - the current text generation call will exceed the model\'s predefined '''
f'''maximum length ({self.max_position_embeddings}). Depending on the model, you may observe '''
'''exceptions, performance degradation, or nothing at all.''' )
return is_done
class _lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
"""simple docstring"""
warnings.warn(
'''The class `MaxNewTokensCriteria` is deprecated. '''
f'''Please use `MaxLengthCriteria(max_length={start_length + max_new_tokens})` '''
'''with `max_length = start_length + max_new_tokens` instead.''' , __SCREAMING_SNAKE_CASE , )
snake_case__ : Union[str, Any] =start_length
snake_case__ : Any =max_new_tokens
snake_case__ : List[Any] =start_length + max_new_tokens
@add_start_docstrings(__SCREAMING_SNAKE_CASE )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
return input_ids.shape[-1] >= self.max_length
class _lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
def __init__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None ) -> Optional[Any]:
"""simple docstring"""
snake_case__ : Dict =max_time
snake_case__ : List[Any] =time.time() if initial_timestamp is None else initial_timestamp
@add_start_docstrings(__SCREAMING_SNAKE_CASE )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
return time.time() - self.initial_timestamp > self.max_time
class _lowerCAmelCase ( __UpperCamelCase ):
"""simple docstring"""
@add_start_docstrings(__SCREAMING_SNAKE_CASE )
def __call__( self , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) -> bool:
"""simple docstring"""
return any(criteria(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for criteria in self )
@property
def UpperCAmelCase ( self ) -> Optional[int]:
"""simple docstring"""
for stopping_criterium in self:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return stopping_criterium.max_length
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return stopping_criterium.max_length
return None
def lowercase_ ( SCREAMING_SNAKE_CASE : StoppingCriteriaList , SCREAMING_SNAKE_CASE : int ):
"""simple docstring"""
snake_case__ : List[str] =stopping_criteria.max_length
snake_case__ : Dict =deepcopy(SCREAMING_SNAKE_CASE )
if stopping_max_length is not None and stopping_max_length != max_length:
warnings.warn('''You set different `max_length` for stopping criteria and `max_length` parameter''' , SCREAMING_SNAKE_CASE )
elif stopping_max_length is None:
new_stopping_criteria.append(MaxLengthCriteria(max_length=SCREAMING_SNAKE_CASE ) )
return new_stopping_criteria
| 381 |
import argparse
import re
from pathlib import Path
import requests
import torch
from PIL import Image
from torchvision.transforms import CenterCrop, Compose, Normalize, Resize, ToTensor
from transformers import (
EfficientFormerConfig,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerImageProcessor,
)
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowercase_ ( SCREAMING_SNAKE_CASE : Optional[int] , SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
snake_case__ : Union[str, Any] =old_name
if "patch_embed" in old_name:
snake_case__, snake_case__, snake_case__ : int =old_name.split('''.''' )
if layer == "0":
snake_case__ : Tuple =old_name.replace('''0''' , '''convolution1''' )
elif layer == "1":
snake_case__ : int =old_name.replace('''1''' , '''batchnorm_before''' )
elif layer == "3":
snake_case__ : str =old_name.replace('''3''' , '''convolution2''' )
else:
snake_case__ : Tuple =old_name.replace('''4''' , '''batchnorm_after''' )
if "network" in old_name and re.search(R'''\d\.\d''' , SCREAMING_SNAKE_CASE ):
snake_case__ : Union[str, Any] =R'''\b\d{2}\b'''
if bool(re.search(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) ):
snake_case__ : Any =re.search(R'''\d\.\d\d.''' , SCREAMING_SNAKE_CASE ).group()
else:
snake_case__ : List[Any] =re.search(R'''\d\.\d.''' , SCREAMING_SNAKE_CASE ).group()
if int(match[0] ) < 6:
snake_case__ : int =old_name.replace(SCREAMING_SNAKE_CASE , '''''' )
snake_case__ : Tuple =trimmed_name.replace('''network''' , match[0] + '''.meta4D_layers.blocks.''' + match[2:-1] )
snake_case__ : Union[str, Any] ='''intermediate_stages.''' + trimmed_name
else:
snake_case__ : Optional[int] =old_name.replace(SCREAMING_SNAKE_CASE , '''''' )
if int(match[2] ) < num_meta4D_last_stage:
snake_case__ : List[Any] =trimmed_name.replace('''network''' , '''meta4D_layers.blocks.''' + match[2] )
else:
snake_case__ : Optional[Any] =str(int(match[2] ) - num_meta4D_last_stage )
snake_case__ : List[Any] =trimmed_name.replace('''network''' , '''meta3D_layers.blocks.''' + layer_index )
if "norm1" in old_name:
snake_case__ : Tuple =trimmed_name.replace('''norm1''' , '''layernorm1''' )
elif "norm2" in old_name:
snake_case__ : Union[str, Any] =trimmed_name.replace('''norm2''' , '''layernorm2''' )
elif "fc1" in old_name:
snake_case__ : str =trimmed_name.replace('''fc1''' , '''linear_in''' )
elif "fc2" in old_name:
snake_case__ : Optional[Any] =trimmed_name.replace('''fc2''' , '''linear_out''' )
snake_case__ : Dict ='''last_stage.''' + trimmed_name
elif "network" in old_name and re.search(R'''.\d.''' , SCREAMING_SNAKE_CASE ):
snake_case__ : int =old_name.replace('''network''' , '''intermediate_stages''' )
if "fc" in new_name:
snake_case__ : Union[str, Any] =new_name.replace('''fc''' , '''convolution''' )
elif ("norm1" in new_name) and ("layernorm1" not in new_name):
snake_case__ : Any =new_name.replace('''norm1''' , '''batchnorm_before''' )
elif ("norm2" in new_name) and ("layernorm2" not in new_name):
snake_case__ : Dict =new_name.replace('''norm2''' , '''batchnorm_after''' )
if "proj" in new_name:
snake_case__ : List[Any] =new_name.replace('''proj''' , '''projection''' )
if "dist_head" in new_name:
snake_case__ : Union[str, Any] =new_name.replace('''dist_head''' , '''distillation_classifier''' )
elif "head" in new_name:
snake_case__ : List[Any] =new_name.replace('''head''' , '''classifier''' )
elif "patch_embed" in new_name:
snake_case__ : Union[str, Any] ='''efficientformer.''' + new_name
elif new_name == "norm.weight" or new_name == "norm.bias":
snake_case__ : int =new_name.replace('''norm''' , '''layernorm''' )
snake_case__ : Dict ='''efficientformer.''' + new_name
else:
snake_case__ : List[Any] ='''efficientformer.encoder.''' + new_name
return new_name
def lowercase_ ( SCREAMING_SNAKE_CASE : Optional[Any] , SCREAMING_SNAKE_CASE : Optional[int] ):
"""simple docstring"""
for key in checkpoint.copy().keys():
snake_case__ : List[Any] =checkpoint.pop(SCREAMING_SNAKE_CASE )
snake_case__ : Any =val
return checkpoint
def lowercase_ ( ):
"""simple docstring"""
snake_case__ : int ='''http://images.cocodataset.org/val2017/000000039769.jpg'''
snake_case__ : Optional[int] =Image.open(requests.get(SCREAMING_SNAKE_CASE , stream=SCREAMING_SNAKE_CASE ).raw )
return image
def lowercase_ ( SCREAMING_SNAKE_CASE : Path , SCREAMING_SNAKE_CASE : Path , SCREAMING_SNAKE_CASE : Path , SCREAMING_SNAKE_CASE : bool ):
"""simple docstring"""
snake_case__ : Union[str, Any] =torch.load(SCREAMING_SNAKE_CASE , map_location='''cpu''' )['''model''']
snake_case__ : int =EfficientFormerConfig.from_json_file(SCREAMING_SNAKE_CASE )
snake_case__ : Dict =EfficientFormerForImageClassificationWithTeacher(SCREAMING_SNAKE_CASE )
snake_case__ : Union[str, Any] ='''_'''.join(checkpoint_path.split('''/''' )[-1].split('''.''' )[0].split('''_''' )[:-1] )
snake_case__ : List[Any] =config.depths[-1] - config.num_metaad_blocks + 1
snake_case__ : Dict =convert_torch_checkpoint(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
model.load_state_dict(SCREAMING_SNAKE_CASE )
model.eval()
snake_case__ : Optional[int] ={
'''bilinear''': PILImageResampling.BILINEAR,
'''bicubic''': PILImageResampling.BICUBIC,
'''nearest''': PILImageResampling.NEAREST,
}
# prepare image
snake_case__ : Any =prepare_img()
snake_case__ : str =2_56
snake_case__ : List[Any] =2_24
snake_case__ : Any =EfficientFormerImageProcessor(
size={'''shortest_edge''': image_size} , crop_size={'''height''': crop_size, '''width''': crop_size} , resample=pillow_resamplings['''bicubic'''] , )
snake_case__ : int =processor(images=SCREAMING_SNAKE_CASE , return_tensors='''pt''' ).pixel_values
# original processing pipeline
snake_case__ : List[str] =Compose(
[
Resize(SCREAMING_SNAKE_CASE , interpolation=pillow_resamplings['''bicubic'''] ),
CenterCrop(SCREAMING_SNAKE_CASE ),
ToTensor(),
Normalize(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ),
] )
snake_case__ : Tuple =image_transforms(SCREAMING_SNAKE_CASE ).unsqueeze(0 )
assert torch.allclose(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
snake_case__ : Optional[Any] =model(SCREAMING_SNAKE_CASE )
snake_case__ : List[Any] =outputs.logits
snake_case__ : Optional[Any] =(1, 10_00)
if "l1" in model_name:
snake_case__ : Union[str, Any] =torch.Tensor(
[-0.1312, 0.4353, -1.0499, -0.5124, 0.4183, -0.6793, -1.3777, -0.0893, -0.7358, -2.4328] )
assert torch.allclose(logits[0, :10] , SCREAMING_SNAKE_CASE , atol=1E-3 )
assert logits.shape == expected_shape
elif "l3" in model_name:
snake_case__ : Optional[Any] =torch.Tensor(
[-1.3150, -1.5456, -1.2556, -0.8496, -0.7127, -0.7897, -0.9728, -0.3052, 0.3751, -0.3127] )
assert torch.allclose(logits[0, :10] , SCREAMING_SNAKE_CASE , atol=1E-3 )
assert logits.shape == expected_shape
elif "l7" in model_name:
snake_case__ : Dict =torch.Tensor(
[-1.0283, -1.4131, -0.5644, -1.3115, -0.5785, -1.2049, -0.7528, 0.1992, -0.3822, -0.0878] )
assert logits.shape == expected_shape
else:
raise ValueError(
F'''Unknown model checkpoint: {checkpoint_path}. Supported version of efficientformer are l1, l3 and l7''' )
# Save Checkpoints
Path(SCREAMING_SNAKE_CASE ).mkdir(exist_ok=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
print(F'''Checkpoint successfuly converted. Model saved at {pytorch_dump_path}''' )
processor.save_pretrained(SCREAMING_SNAKE_CASE )
print(F'''Processor successfuly saved at {pytorch_dump_path}''' )
if push_to_hub:
print('''Pushing model to the hub...''' )
model.push_to_hub(
repo_id=F'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add model''' , use_temp_dir=SCREAMING_SNAKE_CASE , )
processor.push_to_hub(
repo_id=F'''Bearnardd/{pytorch_dump_path}''' , commit_message='''Add image processor''' , use_temp_dir=SCREAMING_SNAKE_CASE , )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--pytorch_model_path''',
default=None,
type=str,
required=True,
help='''Path to EfficientFormer pytorch checkpoint.''',
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The json file for EfficientFormer model config.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Push model and image processor to the hub''')
parser.add_argument(
'''--no-push_to_hub''',
dest='''push_to_hub''',
action='''store_false''',
help='''Do not push model and image processor to the hub''',
)
parser.set_defaults(push_to_hub=True)
lowerCamelCase__ = parser.parse_args()
convert_efficientformer_checkpoint(
checkpoint_path=args.pytorch_model_path,
efficientformer_config_file=args.config_file,
pytorch_dump_path=args.pytorch_dump_path,
push_to_hub=args.push_to_hub,
)
| 381 | 1 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVR
from statsmodels.tsa.statespace.sarimax import SARIMAX
def _lowerCamelCase ( snake_case , snake_case , snake_case , snake_case , snake_case ):
_lowerCAmelCase = np.array([[1, item, train_mtch[i]] for i, item in enumerate(snake_case )] )
_lowerCAmelCase = np.array(snake_case )
_lowerCAmelCase = np.dot(np.dot(np.linalg.inv(np.dot(x.transpose() , snake_case ) ) , x.transpose() ) , snake_case )
return abs(beta[0] + test_dt[0] * beta[1] + test_mtch[0] + beta[2] )
def _lowerCamelCase ( snake_case , snake_case , snake_case ):
_lowerCAmelCase = (1, 2, 1)
_lowerCAmelCase = (1, 1, 0, 7)
_lowerCAmelCase = SARIMAX(
snake_case , exog=snake_case , order=snake_case , seasonal_order=snake_case )
_lowerCAmelCase = model.fit(disp=snake_case , maxiter=600 , method='nm' )
_lowerCAmelCase = model_fit.predict(1 , len(snake_case ) , exog=[test_match] )
return result[0]
def _lowerCamelCase ( snake_case , snake_case , snake_case ):
_lowerCAmelCase = SVR(kernel='rbf' , C=1 , gamma=0.1 , epsilon=0.1 )
regressor.fit(snake_case , snake_case )
_lowerCAmelCase = regressor.predict(snake_case )
return y_pred[0]
def _lowerCamelCase ( snake_case ):
train_user.sort()
_lowerCAmelCase = np.percentile(snake_case , 25 )
_lowerCAmelCase = np.percentile(snake_case , 75 )
_lowerCAmelCase = qa - qa
_lowerCAmelCase = qa - (iqr * 0.1)
return low_lim
def _lowerCamelCase ( snake_case , snake_case ):
_lowerCAmelCase = 0
_lowerCAmelCase = 0
for i in list_vote:
if i > actual_result:
_lowerCAmelCase = not_safe + 1
else:
if abs(abs(snake_case ) - abs(snake_case ) ) <= 0.1:
safe += 1
else:
not_safe += 1
return safe > not_safe
if __name__ == "__main__":
# data_input_df = pd.read_csv("ex_data.csv", header=None)
_lowercase: List[Any] = [[1_8_2_3_1, 0.0, 1], [2_2_6_2_1, 1.0, 2], [1_5_6_7_5, 0.0, 3], [2_3_5_8_3, 1.0, 4]]
_lowercase: Optional[Any] = pd.DataFrame(
data_input, columns=['''total_user''', '''total_even''', '''days''']
)
_lowercase: str = Normalizer().fit_transform(data_input_df.values)
# split data
_lowercase: Any = normalize_df[:, 2].tolist()
_lowercase: Tuple = normalize_df[:, 0].tolist()
_lowercase: Tuple = normalize_df[:, 1].tolist()
# for svr (input variable = total date and total match)
_lowercase: Tuple = normalize_df[:, [1, 2]].tolist()
_lowercase: Optional[int] = x[: len(x) - 1]
_lowercase: Tuple = x[len(x) - 1 :]
# for linear regression & sarimax
_lowercase: List[str] = total_date[: len(total_date) - 1]
_lowercase: Dict = total_user[: len(total_user) - 1]
_lowercase: Union[str, Any] = total_match[: len(total_match) - 1]
_lowercase: int = total_date[len(total_date) - 1 :]
_lowercase: Optional[int] = total_user[len(total_user) - 1 :]
_lowercase: List[str] = total_match[len(total_match) - 1 :]
# voting system with forecasting
_lowercase: List[Any] = [
linear_regression_prediction(
trn_date, trn_user, trn_match, tst_date, tst_match
),
sarimax_predictor(trn_user, trn_match, tst_match),
support_vector_regressor(x_train, x_test, trn_user),
]
# check the safety of today's data
_lowercase: Any = '''''' if data_safety_checker(res_vote, tst_user) else '''not '''
print('''Today\'s data is {not_str}safe.''')
| 706 | import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
_lowercase: Optional[Any] = '''platform'''
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class lowerCamelCase__ :
UpperCamelCase__ =PegasusConfig
UpperCamelCase__ ={}
UpperCamelCase__ ="gelu"
def __init__( self : Union[str, Any] , lowercase__ : Optional[Any] , lowercase__ : str=13 , lowercase__ : Any=7 , lowercase__ : Tuple=True , lowercase__ : str=False , lowercase__ : Optional[int]=99 , lowercase__ : Optional[int]=32 , lowercase__ : Optional[int]=5 , lowercase__ : Optional[int]=4 , lowercase__ : List[str]=37 , lowercase__ : Dict=0.1 , lowercase__ : Optional[int]=0.1 , lowercase__ : Optional[Any]=20 , lowercase__ : int=2 , lowercase__ : Dict=1 , lowercase__ : Union[str, Any]=0 , ):
_lowerCAmelCase = parent
_lowerCAmelCase = batch_size
_lowerCAmelCase = seq_length
_lowerCAmelCase = is_training
_lowerCAmelCase = use_labels
_lowerCAmelCase = vocab_size
_lowerCAmelCase = hidden_size
_lowerCAmelCase = num_hidden_layers
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = intermediate_size
_lowerCAmelCase = hidden_dropout_prob
_lowerCAmelCase = attention_probs_dropout_prob
_lowerCAmelCase = max_position_embeddings
_lowerCAmelCase = eos_token_id
_lowerCAmelCase = pad_token_id
_lowerCAmelCase = bos_token_id
def SCREAMING_SNAKE_CASE__ ( self : str ):
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
_lowerCAmelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
_lowerCAmelCase = np.concatenate([input_ids, eos_tensor] , axis=1 )
_lowerCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_lowerCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_lowerCAmelCase = prepare_pegasus_inputs_dict(lowercase__ , lowercase__ , lowercase__ )
return config, inputs_dict
def SCREAMING_SNAKE_CASE__ ( self : int , lowercase__ : Any , lowercase__ : Optional[Any] , lowercase__ : List[Any] ):
_lowerCAmelCase = 20
_lowerCAmelCase = model_class_name(lowercase__ )
_lowerCAmelCase = model.encode(inputs_dict['input_ids'] )
_lowerCAmelCase , _lowerCAmelCase = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
_lowerCAmelCase = model.init_cache(decoder_input_ids.shape[0] , lowercase__ , lowercase__ )
_lowerCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype='i4' )
_lowerCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowerCAmelCase = model.decode(
decoder_input_ids[:, :-1] , lowercase__ , decoder_attention_mask=lowercase__ , past_key_values=lowercase__ , decoder_position_ids=lowercase__ , )
_lowerCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
_lowerCAmelCase = model.decode(
decoder_input_ids[:, -1:] , lowercase__ , decoder_attention_mask=lowercase__ , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowercase__ , )
_lowerCAmelCase = model.decode(lowercase__ , lowercase__ )
_lowerCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , lowercase__ : Tuple , lowercase__ : List[str] , lowercase__ : str ):
_lowerCAmelCase = 20
_lowerCAmelCase = model_class_name(lowercase__ )
_lowerCAmelCase = model.encode(inputs_dict['input_ids'] )
_lowerCAmelCase , _lowerCAmelCase = (
inputs_dict['decoder_input_ids'],
inputs_dict['decoder_attention_mask'],
)
_lowerCAmelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_lowerCAmelCase = model.init_cache(decoder_input_ids.shape[0] , lowercase__ , lowercase__ )
_lowerCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_lowerCAmelCase = model.decode(
decoder_input_ids[:, :-1] , lowercase__ , decoder_attention_mask=lowercase__ , past_key_values=lowercase__ , decoder_position_ids=lowercase__ , )
_lowerCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype='i4' )
_lowerCAmelCase = model.decode(
decoder_input_ids[:, -1:] , lowercase__ , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowercase__ , decoder_position_ids=lowercase__ , )
_lowerCAmelCase = model.decode(lowercase__ , lowercase__ , decoder_attention_mask=lowercase__ )
_lowerCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'Max diff is {diff}' )
def _lowerCamelCase ( snake_case , snake_case , snake_case , snake_case=None , snake_case=None , ):
if attention_mask is None:
_lowerCAmelCase = np.not_equal(snake_case , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_lowerCAmelCase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class lowerCamelCase__ ( UpperCAmelCase ,unittest.TestCase ):
UpperCamelCase__ =(
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
UpperCamelCase__ =(FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
UpperCamelCase__ =True
UpperCamelCase__ =False
UpperCamelCase__ =False
UpperCamelCase__ =False
def SCREAMING_SNAKE_CASE__ ( self : str ):
_lowerCAmelCase = FlaxPegasusModelTester(self )
_lowerCAmelCase = ConfigTester(self , config_class=lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : Tuple ):
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowercase__ , lowercase__ , lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Dict ):
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowercase__ , lowercase__ , lowercase__ )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCAmelCase = self._prepare_for_class(lowercase__ , lowercase__ )
_lowerCAmelCase = model_class(lowercase__ )
@jax.jit
def encode_jitted(lowercase__ : int , lowercase__ : List[str]=None , **lowercase__ : Optional[Any] ):
return model.encode(input_ids=lowercase__ , attention_mask=lowercase__ )
with self.subTest('JIT Enabled' ):
_lowerCAmelCase = encode_jitted(**lowercase__ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_lowerCAmelCase = encode_jitted(**lowercase__ ).to_tuple()
self.assertEqual(len(lowercase__ ) , len(lowercase__ ) )
for jitted_output, output in zip(lowercase__ , lowercase__ ):
self.assertEqual(jitted_output.shape , output.shape )
def SCREAMING_SNAKE_CASE__ ( self : int ):
_lowerCAmelCase , _lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_lowerCAmelCase = model_class(lowercase__ )
_lowerCAmelCase = model.encode(inputs_dict['input_ids'] , inputs_dict['attention_mask'] )
_lowerCAmelCase = {
'decoder_input_ids': inputs_dict['decoder_input_ids'],
'decoder_attention_mask': inputs_dict['decoder_attention_mask'],
'encoder_outputs': encoder_outputs,
}
@jax.jit
def decode_jitted(lowercase__ : Dict , lowercase__ : Optional[Any] , lowercase__ : Union[str, Any] ):
return model.decode(
decoder_input_ids=lowercase__ , decoder_attention_mask=lowercase__ , encoder_outputs=lowercase__ , )
with self.subTest('JIT Enabled' ):
_lowerCAmelCase = decode_jitted(**lowercase__ ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
_lowerCAmelCase = decode_jitted(**lowercase__ ).to_tuple()
self.assertEqual(len(lowercase__ ) , len(lowercase__ ) )
for jitted_output, output in zip(lowercase__ , lowercase__ ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ):
for model_class_name in self.all_model_classes:
_lowerCAmelCase = model_class_name.from_pretrained('google/pegasus-large' , from_pt=lowercase__ )
_lowerCAmelCase = np.ones((1, 1) )
_lowerCAmelCase = model(lowercase__ )
self.assertIsNotNone(lowercase__ )
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
_lowerCAmelCase = FlaxPegasusForConditionalGeneration.from_pretrained('google/pegasus-xsum' )
_lowerCAmelCase = PegasusTokenizer.from_pretrained('google/pegasus-xsum' )
_lowerCAmelCase = [
' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.',
' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ',
]
_lowerCAmelCase = [
'California\'s largest electricity provider has turned off power to hundreds of thousands of customers.',
'Pop group N-Dubz have revealed they were surprised to get four nominations for this year\'s Mobo Awards.',
]
_lowerCAmelCase = tokenizer(lowercase__ , return_tensors='np' , truncation=lowercase__ , max_length=5_12 , padding=lowercase__ )
_lowerCAmelCase = model.generate(**lowercase__ , num_beams=2 ).sequences
_lowerCAmelCase = tokenizer.batch_decode(lowercase__ , skip_special_tokens=lowercase__ )
assert tgt_text == decoded
| 225 | 0 |
'''simple docstring'''
import sacrebleu as scb
from packaging import version
from sacrebleu import CHRF
import datasets
lowerCAmelCase_ = '\\n@inproceedings{popovic-2015-chrf,\n title = "chr{F}: character n-gram {F}-score for automatic {MT} evaluation",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Tenth Workshop on Statistical Machine Translation",\n month = sep,\n year = "2015",\n address = "Lisbon, Portugal",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W15-3049",\n doi = "10.18653/v1/W15-3049",\n pages = "392--395",\n}\n@inproceedings{popovic-2017-chrf,\n title = "chr{F}++: words helping character n-grams",\n author = "Popovi{\'c}, Maja",\n booktitle = "Proceedings of the Second Conference on Machine Translation",\n month = sep,\n year = "2017",\n address = "Copenhagen, Denmark",\n publisher = "Association for Computational Linguistics",\n url = "https://aclanthology.org/W17-4770",\n doi = "10.18653/v1/W17-4770",\n pages = "612--618",\n}\n@inproceedings{post-2018-call,\n title = "A Call for Clarity in Reporting {BLEU} Scores",\n author = "Post, Matt",\n booktitle = "Proceedings of the Third Conference on Machine Translation: Research Papers",\n month = oct,\n year = "2018",\n address = "Belgium, Brussels",\n publisher = "Association for Computational Linguistics",\n url = "https://www.aclweb.org/anthology/W18-6319",\n pages = "186--191",\n}\n'
lowerCAmelCase_ = '\\nChrF and ChrF++ are two MT evaluation metrics. They both use the F-score statistic for character n-gram matches,\nand ChrF++ adds word n-grams as well which correlates more strongly with direct assessment. We use the implementation\nthat is already present in sacrebleu.\n\nThe implementation here is slightly different from sacrebleu in terms of the required input format. The length of\nthe references and hypotheses lists need to be the same, so you may need to transpose your references compared to\nsacrebleu\'s required input format. See https://github.com/huggingface/datasets/issues/3154#issuecomment-950746534\n\nSee the README.md file at https://github.com/mjpost/sacreBLEU#chrf--chrf for more information.\n'
lowerCAmelCase_ = '\nProduces ChrF(++) scores for hypotheses given reference translations.\n\nArgs:\n predictions (list of str): The predicted sentences.\n references (list of list of str): The references. There should be one reference sub-list for each prediction sentence.\n char_order (int): Character n-gram order. Defaults to `6`.\n word_order (int): Word n-gram order. If equals to `2`, the metric is referred to as chrF++. Defaults to `0`.\n beta (int): Determine the importance of recall w.r.t precision. Defaults to `2`.\n lowercase (bool): if `True`, enables case-insensitivity. Defaults to `False`.\n whitespace (bool): If `True`, include whitespaces when extracting character n-grams.\n eps_smoothing (bool): If `True`, applies epsilon smoothing similar\n to reference chrF++.py, NLTK and Moses implementations. If `False`,\n it takes into account effective match order similar to sacreBLEU < 2.0.0. Defaults to `False`.\n\nReturns:\n \'score\' (float): The chrF (chrF++) score,\n \'char_order\' (int): The character n-gram order,\n \'word_order\' (int): The word n-gram order. If equals to 2, the metric is referred to as chrF++,\n \'beta\' (int): Determine the importance of recall w.r.t precision\n\nExamples:\n Example 1--a simple example of calculating chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction, references=reference)\n >>> print(results)\n {\'score\': 84.64214891738334, \'char_order\': 6, \'word_order\': 0, \'beta\': 2}\n\n Example 2--the same example, but with the argument word_order=2, to calculate chrF++ instead of chrF:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2)\n >>> print(results)\n {\'score\': 82.87263732906315, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n\n Example 3--the same chrF++ example as above, but with `lowercase=True` to normalize all case:\n >>> prediction = ["The relationship between cats and dogs is not exactly friendly.", "a good bookshop is just a genteel black hole that knows how to read."]\n >>> reference = [["The relationship between dogs and cats is not exactly friendly."], ["A good bookshop is just a genteel Black Hole that knows how to read."]]\n >>> chrf = datasets.load_metric("chrf")\n >>> results = chrf.compute(predictions=prediction,\n ... references=reference,\n ... word_order=2,\n ... lowercase=True)\n >>> print(results)\n {\'score\': 92.12853119829202, \'char_order\': 6, \'word_order\': 2, \'beta\': 2}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCAmelCase_ ( datasets.Metric ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE__ ( self ) -> List[Any]:
'''simple docstring'''
if version.parse(scb.__version__ ) < version.parse("1.4.12" ):
raise ImportWarning(
"To use `sacrebleu`, the module `sacrebleu>=1.4.12` is required, and the current version of `sacrebleu` doesn't match this condition.\n"
"You can install it with `pip install \"sacrebleu>=1.4.12\"`." )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/mjpost/sacreBLEU#chrf--chrf" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Sequence(datasets.Value("string" , id="sequence" ) , id="references" ),
} ) , codebase_urls=["https://github.com/mjpost/sacreBLEU#chrf--chrf"] , reference_urls=[
"https://github.com/m-popovic/chrF",
] , )
def SCREAMING_SNAKE_CASE__ ( self , lowerCamelCase , lowerCamelCase , lowerCamelCase = CHRF.CHAR_ORDER , lowerCamelCase = CHRF.WORD_ORDER , lowerCamelCase = CHRF.BETA , lowerCamelCase = False , lowerCamelCase = False , lowerCamelCase = False , ) -> Dict:
'''simple docstring'''
UpperCamelCase : Any = len(references[0] )
if any(len(lowerCamelCase ) != references_per_prediction for refs in references ):
raise ValueError("Sacrebleu requires the same number of references for each prediction" )
UpperCamelCase : int = [[refs[i] for refs in references] for i in range(lowerCamelCase )]
UpperCamelCase : int = CHRF(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase )
UpperCamelCase : List[str] = sb_chrf.corpus_score(lowerCamelCase , lowerCamelCase )
return {
"score": output.score,
"char_order": output.char_order,
"word_order": output.word_order,
"beta": output.beta,
}
| 173 |
'''simple docstring'''
def A__ ( A : Any): # noqa: E741
'''simple docstring'''
UpperCamelCase : List[Any] = len(A)
UpperCamelCase : Any = 0
UpperCamelCase : Optional[Any] = [0] * n
UpperCamelCase : Union[str, Any] = [False] * n
UpperCamelCase : Dict = [False] * n
def dfs(A : Optional[int] , A : Dict , A : List[Any] , A : int):
if parent == root:
out_edge_count += 1
UpperCamelCase : Union[str, Any] = True
UpperCamelCase : Any = at
for to in l[at]:
if to == parent:
pass
elif not visited[to]:
UpperCamelCase : int = dfs(A , A , A , A)
UpperCamelCase : Any = min(low[at] , low[to])
# AP found via bridge
if at < low[to]:
UpperCamelCase : int = True
# AP found via cycle
if at == low[to]:
UpperCamelCase : Any = True
else:
UpperCamelCase : List[Any] = min(low[at] , A)
return out_edge_count
for i in range(A):
if not visited[i]:
UpperCamelCase : Union[str, Any] = 0
UpperCamelCase : List[Any] = dfs(A , A , -1 , A)
UpperCamelCase : Dict = out_edge_count > 1
for x in range(len(A)):
if is_art[x] is True:
print(A)
# Adjacency list of graph
lowerCAmelCase_ = {
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
}
compute_ap(data)
| 173 | 1 |
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase__ :
'''simple docstring'''
def __init__(self ,__lowerCamelCase ,__lowerCamelCase=13 ,__lowerCamelCase=30 ,__lowerCamelCase=2 ,__lowerCamelCase=3 ,__lowerCamelCase=True ,__lowerCamelCase=True ,__lowerCamelCase=32 ,__lowerCamelCase=5 ,__lowerCamelCase=4 ,__lowerCamelCase=37 ,__lowerCamelCase="gelu" ,__lowerCamelCase=0.1 ,__lowerCamelCase=0.1 ,__lowerCamelCase=10 ,__lowerCamelCase=0.02 ,__lowerCamelCase=None ,__lowerCamelCase=2 ,) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase__ : Dict = parent
lowerCAmelCase__ : str = batch_size
lowerCAmelCase__ : Dict = image_size
lowerCAmelCase__ : int = patch_size
lowerCAmelCase__ : Optional[Any] = num_channels
lowerCAmelCase__ : Dict = is_training
lowerCAmelCase__ : Tuple = use_labels
lowerCAmelCase__ : str = hidden_size
lowerCAmelCase__ : Any = num_hidden_layers
lowerCAmelCase__ : int = num_attention_heads
lowerCAmelCase__ : Union[str, Any] = intermediate_size
lowerCAmelCase__ : str = hidden_act
lowerCAmelCase__ : Optional[int] = hidden_dropout_prob
lowerCAmelCase__ : Dict = attention_probs_dropout_prob
lowerCAmelCase__ : List[str] = type_sequence_label_size
lowerCAmelCase__ : int = initializer_range
lowerCAmelCase__ : Any = scope
lowerCAmelCase__ : Tuple = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase__ : Optional[Any] = (image_size // patch_size) ** 2
lowerCAmelCase__ : str = num_patches + 1
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : Union[str, Any] = None
if self.use_labels:
lowerCAmelCase__ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowerCAmelCase__ : Dict = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
return ViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=__lowerCamelCase ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Dict = ViTModel(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCAmelCase__ : List[str] = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ : Dict = ViTForMaskedImageModeling(config=__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCAmelCase__ : int = model(__lowerCamelCase )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCAmelCase__ : Dict = 1
lowerCAmelCase__ : List[Any] = ViTForMaskedImageModeling(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCAmelCase__ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ : int = model(__lowerCamelCase )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : int = self.type_sequence_label_size
lowerCAmelCase__ : Dict = ViTForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCAmelCase__ : str = model(__lowerCamelCase ,labels=__lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase__ : str = 1
lowerCAmelCase__ : Optional[Any] = ViTForImageClassification(__lowerCamelCase )
model.to(__lowerCamelCase )
model.eval()
lowerCAmelCase__ : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ : Any = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Tuple = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Tuple = config_and_inputs
lowerCAmelCase__ : Any = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase__ ( lowerCamelCase__ , lowerCamelCase__ , unittest.TestCase):
'''simple docstring'''
snake_case_ =(
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
snake_case_ =(
{"""feature-extraction""": ViTModel, """image-classification""": ViTForImageClassification}
if is_torch_available()
else {}
)
snake_case_ =True
snake_case_ =False
snake_case_ =False
snake_case_ =False
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : Dict = ViTModelTester(self )
lowerCAmelCase__ : Dict = ConfigTester(self ,config_class=__lowerCamelCase ,has_text_modality=__lowerCamelCase ,hidden_size=37 )
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason='''ViT does not use inputs_embeds''' )
def lowerCAmelCase__ (self ) -> Optional[Any]:
"""simple docstring"""
pass
def lowerCAmelCase__ (self ) -> int:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Optional[int] = model_class(__lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
lowerCAmelCase__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__lowerCamelCase ,nn.Linear ) )
def lowerCAmelCase__ (self ) -> Dict:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Optional[int] = model_class(__lowerCamelCase )
lowerCAmelCase__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : Tuple = [*signature.parameters.keys()]
lowerCAmelCase__ : Any = ['''pixel_values''']
self.assertListEqual(arg_names[:1] ,__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__lowerCamelCase )
def lowerCAmelCase__ (self ) -> str:
"""simple docstring"""
lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCamelCase )
@slow
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Optional[int] = ViTModel.from_pretrained(__lowerCamelCase )
self.assertIsNotNone(__lowerCamelCase )
def lowerCAmelCase__ ( ):
'''simple docstring'''
lowerCAmelCase__ : int = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''')
return image
@require_torch
@require_vision
class lowerCamelCase__ ( unittest.TestCase):
'''simple docstring'''
@cached_property
def lowerCAmelCase__ (self ) -> Any:
"""simple docstring"""
return ViTImageProcessor.from_pretrained('''google/vit-base-patch16-224''' ) if is_vision_available() else None
@slow
def lowerCAmelCase__ (self ) -> Union[str, Any]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = ViTForImageClassification.from_pretrained('''google/vit-base-patch16-224''' ).to(__lowerCamelCase )
lowerCAmelCase__ : Tuple = self.default_image_processor
lowerCAmelCase__ : Any = prepare_img()
lowerCAmelCase__ : List[str] = image_processor(images=__lowerCamelCase ,return_tensors='''pt''' ).to(__lowerCamelCase )
# forward pass
with torch.no_grad():
lowerCAmelCase__ : Optional[int] = model(**__lowerCamelCase )
# verify the logits
lowerCAmelCase__ : Union[str, Any] = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape ,__lowerCamelCase )
lowerCAmelCase__ : List[str] = torch.tensor([-0.2744, 0.8215, -0.0836] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__lowerCamelCase ,atol=1e-4 ) )
@slow
def lowerCAmelCase__ (self ) -> Optional[int]:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = ViTModel.from_pretrained('''facebook/dino-vits8''' ).to(__lowerCamelCase )
lowerCAmelCase__ : Tuple = ViTImageProcessor.from_pretrained('''facebook/dino-vits8''' ,size=4_80 )
lowerCAmelCase__ : Optional[Any] = prepare_img()
lowerCAmelCase__ : str = image_processor(images=__lowerCamelCase ,return_tensors='''pt''' )
lowerCAmelCase__ : Tuple = inputs.pixel_values.to(__lowerCamelCase )
# forward pass
with torch.no_grad():
lowerCAmelCase__ : Optional[int] = model(__lowerCamelCase ,interpolate_pos_encoding=__lowerCamelCase )
# verify the logits
lowerCAmelCase__ : Optional[int] = torch.Size((1, 36_01, 3_84) )
self.assertEqual(outputs.last_hidden_state.shape ,__lowerCamelCase )
lowerCAmelCase__ : List[str] = torch.tensor(
[[4.2340, 4.3906, -6.6692], [4.5463, 1.8928, -6.7257], [4.4429, 0.8496, -5.8585]] ).to(__lowerCamelCase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] ,__lowerCamelCase ,atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
lowerCAmelCase__ : Tuple = ViTModel.from_pretrained('''facebook/dino-vits8''' ,torch_dtype=torch.floataa ,device_map='''auto''' )
lowerCAmelCase__ : Optional[int] = self.default_image_processor
lowerCAmelCase__ : str = prepare_img()
lowerCAmelCase__ : Any = image_processor(images=__lowerCamelCase ,return_tensors='''pt''' )
lowerCAmelCase__ : Optional[int] = inputs.pixel_values.to(__lowerCamelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowerCAmelCase__ : Tuple = model(__lowerCamelCase )
| 90 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__snake_case : str =logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCamelCase__ ( lowerCamelCase__):
'''simple docstring'''
snake_case_ =["""pixel_values"""]
def __init__(self ,__lowerCamelCase = True ,__lowerCamelCase = None ,__lowerCamelCase = PILImageResampling.BICUBIC ,__lowerCamelCase = True ,__lowerCamelCase = None ,__lowerCamelCase = True ,__lowerCamelCase = 1 / 2_55 ,__lowerCamelCase = True ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = True ,**__lowerCamelCase ,) -> None:
"""simple docstring"""
super().__init__(**__lowerCamelCase )
lowerCAmelCase__ : Union[str, Any] = size if size is not None else {'''shortest_edge''': 2_24}
lowerCAmelCase__ : Union[str, Any] = get_size_dict(__lowerCamelCase ,default_to_square=__lowerCamelCase )
lowerCAmelCase__ : List[Any] = crop_size if crop_size is not None else {'''height''': 2_24, '''width''': 2_24}
lowerCAmelCase__ : Optional[int] = get_size_dict(__lowerCamelCase ,default_to_square=__lowerCamelCase ,param_name='''crop_size''' )
lowerCAmelCase__ : Optional[int] = do_resize
lowerCAmelCase__ : Any = size
lowerCAmelCase__ : int = resample
lowerCAmelCase__ : Dict = do_center_crop
lowerCAmelCase__ : str = crop_size
lowerCAmelCase__ : Dict = do_rescale
lowerCAmelCase__ : Optional[Any] = rescale_factor
lowerCAmelCase__ : Dict = do_normalize
lowerCAmelCase__ : Tuple = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
lowerCAmelCase__ : Union[str, Any] = image_std if image_std is not None else OPENAI_CLIP_STD
lowerCAmelCase__ : int = do_convert_rgb
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = PILImageResampling.BICUBIC ,__lowerCamelCase = None ,**__lowerCamelCase ,) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase__ : Optional[int] = get_size_dict(__lowerCamelCase ,default_to_square=__lowerCamelCase )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
lowerCAmelCase__ : Optional[int] = get_resize_output_image_size(__lowerCamelCase ,size=size['''shortest_edge'''] ,default_to_square=__lowerCamelCase )
return resize(__lowerCamelCase ,size=__lowerCamelCase ,resample=__lowerCamelCase ,data_format=__lowerCamelCase ,**__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = None ,**__lowerCamelCase ,) -> np.ndarray:
"""simple docstring"""
lowerCAmelCase__ : List[Any] = get_size_dict(__lowerCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys (height, width). Got {size.keys()}""" )
return center_crop(__lowerCamelCase ,size=(size['''height'''], size['''width''']) ,data_format=__lowerCamelCase ,**__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = None ,**__lowerCamelCase ,) -> int:
"""simple docstring"""
return rescale(__lowerCamelCase ,scale=__lowerCamelCase ,data_format=__lowerCamelCase ,**__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase = None ,**__lowerCamelCase ,) -> np.ndarray:
"""simple docstring"""
return normalize(__lowerCamelCase ,mean=__lowerCamelCase ,std=__lowerCamelCase ,data_format=__lowerCamelCase ,**__lowerCamelCase )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = None ,__lowerCamelCase = ChannelDimension.FIRST ,**__lowerCamelCase ,) -> PIL.Image.Image:
"""simple docstring"""
lowerCAmelCase__ : Tuple = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ : Tuple = size if size is not None else self.size
lowerCAmelCase__ : Any = get_size_dict(__lowerCamelCase ,param_name='''size''' ,default_to_square=__lowerCamelCase )
lowerCAmelCase__ : Tuple = resample if resample is not None else self.resample
lowerCAmelCase__ : Dict = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCAmelCase__ : List[Any] = crop_size if crop_size is not None else self.crop_size
lowerCAmelCase__ : str = get_size_dict(__lowerCamelCase ,param_name='''crop_size''' ,default_to_square=__lowerCamelCase )
lowerCAmelCase__ : Dict = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ : List[Any] = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ : Dict = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ : int = image_std if image_std is not None else self.image_std
lowerCAmelCase__ : Dict = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
lowerCAmelCase__ : Optional[int] = make_list_of_images(__lowerCamelCase )
if not valid_images(__lowerCamelCase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
lowerCAmelCase__ : Union[str, Any] = [convert_to_rgb(__lowerCamelCase ) for image in images]
# All transformations expect numpy arrays.
lowerCAmelCase__ : Any = [to_numpy_array(__lowerCamelCase ) for image in images]
if do_resize:
lowerCAmelCase__ : str = [self.resize(image=__lowerCamelCase ,size=__lowerCamelCase ,resample=__lowerCamelCase ) for image in images]
if do_center_crop:
lowerCAmelCase__ : Dict = [self.center_crop(image=__lowerCamelCase ,size=__lowerCamelCase ) for image in images]
if do_rescale:
lowerCAmelCase__ : Union[str, Any] = [self.rescale(image=__lowerCamelCase ,scale=__lowerCamelCase ) for image in images]
if do_normalize:
lowerCAmelCase__ : Optional[Any] = [self.normalize(image=__lowerCamelCase ,mean=__lowerCamelCase ,std=__lowerCamelCase ) for image in images]
lowerCAmelCase__ : List[Any] = [to_channel_dimension_format(__lowerCamelCase ,__lowerCamelCase ) for image in images]
lowerCAmelCase__ : Optional[int] = {'''pixel_values''': images}
return BatchFeature(data=__lowerCamelCase ,tensor_type=__lowerCamelCase )
| 90 | 1 |
'''simple docstring'''
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCamelCase : str = 1_6
lowerCamelCase : List[str] = 3_2
def _lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : Union[str, Any] = 16 , _UpperCamelCase : Optional[int] = "bert-base-cased" ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =AutoTokenizer.from_pretrained(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE =load_dataset('glue' , 'mrpc' )
def tokenize_function(_UpperCamelCase : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
_SCREAMING_SNAKE_CASE =tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=UpperCAmelCase_ , max_length=UpperCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_SCREAMING_SNAKE_CASE =datasets.map(
UpperCAmelCase_ , batched=UpperCAmelCase_ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=UpperCAmelCase_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_SCREAMING_SNAKE_CASE =tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_UpperCamelCase : Union[str, Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(UpperCAmelCase_ , padding='max_length' , max_length=1_28 , return_tensors='pt' )
return tokenizer.pad(UpperCAmelCase_ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
_SCREAMING_SNAKE_CASE =DataLoader(
tokenized_datasets['train'] , shuffle=UpperCAmelCase_ , collate_fn=UpperCAmelCase_ , batch_size=UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE =DataLoader(
tokenized_datasets['validation'] , shuffle=UpperCAmelCase_ , collate_fn=UpperCAmelCase_ , batch_size=UpperCAmelCase_ )
return train_dataloader, eval_dataloader
def _lowerCAmelCase ( _UpperCamelCase : Dict , _UpperCamelCase : Dict , _UpperCamelCase : List[str] , _UpperCamelCase : Optional[int] ) -> Optional[int]:
"""simple docstring"""
model.eval()
_SCREAMING_SNAKE_CASE =0
for step, batch in enumerate(UpperCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_SCREAMING_SNAKE_CASE =model(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE =outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_SCREAMING_SNAKE_CASE =accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(UpperCAmelCase_ ) - 1:
_SCREAMING_SNAKE_CASE =predictions[: len(eval_dataloader.dataset ) - samples_seen]
_SCREAMING_SNAKE_CASE =references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=UpperCAmelCase_ , references=UpperCAmelCase_ , )
_SCREAMING_SNAKE_CASE =metric.compute()
return eval_metric["accuracy"]
def _lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : Optional[int] ) -> str:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_SCREAMING_SNAKE_CASE =config["""lr"""]
_SCREAMING_SNAKE_CASE =int(config['num_epochs'] )
_SCREAMING_SNAKE_CASE =int(config['seed'] )
_SCREAMING_SNAKE_CASE =int(config['batch_size'] )
_SCREAMING_SNAKE_CASE =args.model_name_or_path
set_seed(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE =get_dataloaders(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_SCREAMING_SNAKE_CASE =AutoModelForSequenceClassification.from_pretrained(UpperCAmelCase_ , return_dict=UpperCAmelCase_ )
# Instantiate optimizer
_SCREAMING_SNAKE_CASE =(
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_SCREAMING_SNAKE_CASE =optimizer_cls(params=model.parameters() , lr=UpperCAmelCase_ )
if accelerator.state.deepspeed_plugin is not None:
_SCREAMING_SNAKE_CASE =accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =(len(UpperCAmelCase_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_SCREAMING_SNAKE_CASE =get_linear_schedule_with_warmup(
optimizer=UpperCAmelCase_ , num_warmup_steps=0 , num_training_steps=UpperCAmelCase_ , )
else:
_SCREAMING_SNAKE_CASE =DummyScheduler(UpperCAmelCase_ , total_num_steps=UpperCAmelCase_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_SCREAMING_SNAKE_CASE =accelerator.prepare(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
# We need to keep track of how many total steps we have iterated over
_SCREAMING_SNAKE_CASE =0
# We also need to keep track of the stating epoch so files are named properly
_SCREAMING_SNAKE_CASE =0
_SCREAMING_SNAKE_CASE =evaluate.load('glue' , 'mrpc' )
_SCREAMING_SNAKE_CASE =num_epochs
if args.partial_train_epoch is not None:
_SCREAMING_SNAKE_CASE =args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
_SCREAMING_SNAKE_CASE =args.resume_from_checkpoint.split('epoch_' )[1]
_SCREAMING_SNAKE_CASE =""""""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
_SCREAMING_SNAKE_CASE =int(UpperCAmelCase_ ) + 1
_SCREAMING_SNAKE_CASE =evaluation_loop(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
accelerator.print('resumed checkpoint performance:' , UpperCAmelCase_ )
accelerator.print('resumed checkpoint\'s scheduler\'s lr:' , lr_scheduler.get_lr()[0] )
accelerator.print('resumed optimizers\'s lr:' , optimizer.param_groups[0]['lr'] )
with open(os.path.join(args.output_dir , f"state_{starting_epoch-1}.json" ) , 'r' ) as f:
_SCREAMING_SNAKE_CASE =json.load(UpperCAmelCase_ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
_SCREAMING_SNAKE_CASE ={}
for epoch in range(UpperCAmelCase_ , UpperCAmelCase_ ):
model.train()
for step, batch in enumerate(UpperCAmelCase_ ):
_SCREAMING_SNAKE_CASE =model(**UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE =outputs.loss
_SCREAMING_SNAKE_CASE =loss / gradient_accumulation_steps
accelerator.backward(UpperCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
_SCREAMING_SNAKE_CASE =f"epoch_{epoch}"
_SCREAMING_SNAKE_CASE =os.path.join(args.output_dir , UpperCAmelCase_ )
accelerator.save_state(UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE =evaluation_loop(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_SCREAMING_SNAKE_CASE =accuracy
_SCREAMING_SNAKE_CASE =lr_scheduler.get_lr()[0]
_SCREAMING_SNAKE_CASE =optimizer.param_groups[0]["""lr"""]
_SCREAMING_SNAKE_CASE =epoch
_SCREAMING_SNAKE_CASE =overall_step
accelerator.print(f"epoch {epoch}:" , UpperCAmelCase_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , f"state_{epoch}.json" ) , 'w' ) as f:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ )
def _lowerCAmelCase ( ) -> List[Any]:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=UpperCAmelCase_ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=UpperCAmelCase_ , )
parser.add_argument(
'--output_dir' , type=UpperCAmelCase_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--partial_train_epoch' , type=UpperCAmelCase_ , default=UpperCAmelCase_ , help='If passed, the training will stop after this number of epochs.' , )
parser.add_argument(
'--num_epochs' , type=UpperCAmelCase_ , default=2 , help='Number of train epochs.' , )
_SCREAMING_SNAKE_CASE =parser.parse_args()
_SCREAMING_SNAKE_CASE ={"""lr""": 2E-5, """num_epochs""": args.num_epochs, """seed""": 42, """batch_size""": 16}
training_function(UpperCAmelCase_ , UpperCAmelCase_ )
if __name__ == "__main__":
main()
| 405 |
import unittest
from transformers import DebertaVaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaVaForMaskedLM,
DebertaVaForMultipleChoice,
DebertaVaForQuestionAnswering,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaModel,
)
from transformers.models.deberta_va.modeling_deberta_va import DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase__ (__snake_case ):
"""simple docstring"""
def __init__( self : Tuple , __a : Union[str, Any] , __a : Optional[int]=1_3 , __a : Tuple=7 , __a : Dict=True , __a : List[Any]=True , __a : str=True , __a : str=True , __a : Any=9_9 , __a : Optional[Any]=3_2 , __a : Tuple=5 , __a : Union[str, Any]=4 , __a : List[str]=3_7 , __a : Optional[int]="gelu" , __a : str=0.1 , __a : Union[str, Any]=0.1 , __a : List[Any]=5_1_2 , __a : List[Any]=1_6 , __a : List[Any]=2 , __a : Optional[Any]=0.02 , __a : List[Any]=False , __a : List[Any]=True , __a : Optional[int]="None" , __a : Optional[Any]=3 , __a : Optional[int]=4 , __a : Optional[int]=None , ):
snake_case__ : Dict = parent
snake_case__ : Optional[Any] = batch_size
snake_case__ : Union[str, Any] = seq_length
snake_case__ : Any = is_training
snake_case__ : int = use_input_mask
snake_case__ : str = use_token_type_ids
snake_case__ : Union[str, Any] = use_labels
snake_case__ : List[Any] = vocab_size
snake_case__ : Tuple = hidden_size
snake_case__ : Tuple = num_hidden_layers
snake_case__ : List[Any] = num_attention_heads
snake_case__ : Optional[int] = intermediate_size
snake_case__ : Optional[Any] = hidden_act
snake_case__ : Tuple = hidden_dropout_prob
snake_case__ : Dict = attention_probs_dropout_prob
snake_case__ : Dict = max_position_embeddings
snake_case__ : List[str] = type_vocab_size
snake_case__ : List[str] = type_sequence_label_size
snake_case__ : Dict = initializer_range
snake_case__ : Optional[Any] = num_labels
snake_case__ : Dict = num_choices
snake_case__ : Optional[Any] = relative_attention
snake_case__ : Any = position_biased_input
snake_case__ : Union[str, Any] = pos_att_type
snake_case__ : Optional[int] = scope
def lowercase ( self : Optional[Any] ):
snake_case__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Dict = None
if self.use_input_mask:
snake_case__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
snake_case__ : List[Any] = None
if self.use_token_type_ids:
snake_case__ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : str = None
snake_case__ : Union[str, Any] = None
snake_case__ : Optional[int] = None
if self.use_labels:
snake_case__ : Dict = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : Any = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowercase ( self : List[Any] ):
return DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def lowercase ( self : Dict , __a : Optional[int] ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def lowercase ( self : List[Any] , __a : Tuple , __a : Optional[Any] , __a : Optional[int] , __a : Optional[Any] , __a : Optional[int] , __a : List[str] , __a : str ):
snake_case__ : Union[str, Any] = DebertaVaModel(config=__a )
model.to(__a )
model.eval()
snake_case__ : Any = model(__a , attention_mask=__a , token_type_ids=__a )[0]
snake_case__ : Union[str, Any] = model(__a , token_type_ids=__a )[0]
snake_case__ : List[str] = model(__a )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def lowercase ( self : Tuple , __a : List[str] , __a : List[Any] , __a : int , __a : Union[str, Any] , __a : List[Any] , __a : Tuple , __a : str ):
snake_case__ : Optional[int] = DebertaVaForMaskedLM(config=__a )
model.to(__a )
model.eval()
snake_case__ : Union[str, Any] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowercase ( self : Tuple , __a : int , __a : str , __a : Tuple , __a : Optional[Any] , __a : Tuple , __a : Optional[int] , __a : List[str] ):
snake_case__ : Tuple = self.num_labels
snake_case__ : int = DebertaVaForSequenceClassification(__a )
model.to(__a )
model.eval()
snake_case__ : Optional[int] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(__a )
def lowercase ( self : Tuple , __a : Optional[int] , __a : int , __a : Optional[Any] , __a : Any , __a : List[str] , __a : List[Any] , __a : List[str] ):
snake_case__ : Union[str, Any] = self.num_labels
snake_case__ : Dict = DebertaVaForTokenClassification(config=__a )
model.to(__a )
model.eval()
snake_case__ : Optional[Any] = model(__a , attention_mask=__a , token_type_ids=__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowercase ( self : Dict , __a : List[str] , __a : List[str] , __a : Tuple , __a : Union[str, Any] , __a : Any , __a : Optional[Any] , __a : Any ):
snake_case__ : int = DebertaVaForQuestionAnswering(config=__a )
model.to(__a )
model.eval()
snake_case__ : List[Any] = model(
__a , attention_mask=__a , token_type_ids=__a , start_positions=__a , end_positions=__a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowercase ( self : List[str] , __a : Optional[Any] , __a : Tuple , __a : List[str] , __a : Optional[Any] , __a : List[str] , __a : str , __a : str ):
snake_case__ : Any = DebertaVaForMultipleChoice(config=__a )
model.to(__a )
model.eval()
snake_case__ : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : List[str] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : List[Any] = model(
__a , attention_mask=__a , token_type_ids=__a , labels=__a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowercase ( self : Union[str, Any] ):
snake_case__ : int = self.prepare_config_and_inputs()
(
(
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) , (
snake_case__
) ,
) : Any = config_and_inputs
snake_case__ : str = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowercase__ (__snake_case , __snake_case , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : Optional[Any] = (
(
DebertaVaModel,
DebertaVaForMaskedLM,
DebertaVaForSequenceClassification,
DebertaVaForTokenClassification,
DebertaVaForQuestionAnswering,
DebertaVaForMultipleChoice,
)
if is_torch_available()
else ()
)
__UpperCamelCase : List[str] = (
{
'feature-extraction': DebertaVaModel,
'fill-mask': DebertaVaForMaskedLM,
'question-answering': DebertaVaForQuestionAnswering,
'text-classification': DebertaVaForSequenceClassification,
'token-classification': DebertaVaForTokenClassification,
'zero-shot': DebertaVaForSequenceClassification,
}
if is_torch_available()
else {}
)
__UpperCamelCase : Any = True
__UpperCamelCase : Union[str, Any] = False
__UpperCamelCase : int = False
__UpperCamelCase : Union[str, Any] = False
__UpperCamelCase : Optional[int] = False
def lowercase ( self : Optional[Any] ):
snake_case__ : Any = DebertaVaModelTester(self )
snake_case__ : Optional[Any] = ConfigTester(self , config_class=__a , hidden_size=3_7 )
def lowercase ( self : List[Any] ):
self.config_tester.run_common_tests()
def lowercase ( self : Tuple ):
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*__a )
def lowercase ( self : Tuple ):
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*__a )
def lowercase ( self : Tuple ):
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*__a )
def lowercase ( self : List[str] ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*__a )
def lowercase ( self : int ):
snake_case__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*__a )
def lowercase ( self : Optional[int] ):
snake_case__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_multiple_choice(*__a )
@slow
def lowercase ( self : Union[str, Any] ):
for model_name in DEBERTA_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : Optional[int] = DebertaVaModel.from_pretrained(__a )
self.assertIsNotNone(__a )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowercase__ (unittest.TestCase ):
"""simple docstring"""
@unittest.skip(reason="""Model not available yet""" )
def lowercase ( self : Optional[int] ):
pass
@slow
def lowercase ( self : int ):
snake_case__ : List[Any] = DebertaVaModel.from_pretrained("""microsoft/deberta-v2-xlarge""" )
snake_case__ : Any = torch.tensor([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] )
snake_case__ : Tuple = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
snake_case__ : int = model(__a , attention_mask=__a )[0]
# compare the actual values for a slice.
snake_case__ : List[str] = torch.tensor(
[[[0.2356, 0.1948, 0.0369], [-0.1063, 0.3586, -0.5152], [-0.6399, -0.0259, -0.2525]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __a , atol=1e-4 ) , f'{output[:, 1:4, 1:4]}' )
| 648 | 0 |
"""simple docstring"""
import unittest
from transformers import is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class __lowercase:
'''simple docstring'''
@staticmethod
def snake_case_ ( *__a , **__a ):
pass
@is_pipeline_test
@require_vision
class __lowercase( unittest.TestCase ):
'''simple docstring'''
@require_torch
def snake_case_ ( self ):
__lowerCamelCase : str = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , )
__lowerCamelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
__lowerCamelCase : str = image_classifier(A_ , candidate_labels=['a', 'b', 'c'] )
# The floating scores are so close, we enter floating error approximation and the order is not guaranteed across
# python and torch versions.
self.assertIn(
nested_simplify(A_ ) , [
[{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'b'}, {'score': 0.333, 'label': 'c'}],
[{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'c'}, {'score': 0.333, 'label': 'b'}],
] , )
__lowerCamelCase : Optional[Any] = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2 )
self.assertEqual(
nested_simplify(A_ ) , [
[
{'score': 0.333, 'label': ANY(A_ )},
{'score': 0.333, 'label': ANY(A_ )},
{'score': 0.333, 'label': ANY(A_ )},
],
[
{'score': 0.333, 'label': ANY(A_ )},
{'score': 0.333, 'label': ANY(A_ )},
{'score': 0.333, 'label': ANY(A_ )},
],
[
{'score': 0.333, 'label': ANY(A_ )},
{'score': 0.333, 'label': ANY(A_ )},
{'score': 0.333, 'label': ANY(A_ )},
],
[
{'score': 0.333, 'label': ANY(A_ )},
{'score': 0.333, 'label': ANY(A_ )},
{'score': 0.333, 'label': ANY(A_ )},
],
[
{'score': 0.333, 'label': ANY(A_ )},
{'score': 0.333, 'label': ANY(A_ )},
{'score': 0.333, 'label': ANY(A_ )},
],
] , )
@require_tf
def snake_case_ ( self ):
__lowerCamelCase : int = pipeline(
model='hf-internal-testing/tiny-random-clip-zero-shot-image-classification' , framework='tf' )
__lowerCamelCase : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
__lowerCamelCase : Tuple = image_classifier(A_ , candidate_labels=['a', 'b', 'c'] )
self.assertEqual(
nested_simplify(A_ ) , [{'score': 0.333, 'label': 'a'}, {'score': 0.333, 'label': 'b'}, {'score': 0.333, 'label': 'c'}] , )
__lowerCamelCase : List[str] = image_classifier([image] * 5 , candidate_labels=['A', 'B', 'C'] , batch_size=2 )
self.assertEqual(
nested_simplify(A_ ) , [
[
{'score': 0.333, 'label': ANY(A_ )},
{'score': 0.333, 'label': ANY(A_ )},
{'score': 0.333, 'label': ANY(A_ )},
],
[
{'score': 0.333, 'label': ANY(A_ )},
{'score': 0.333, 'label': ANY(A_ )},
{'score': 0.333, 'label': ANY(A_ )},
],
[
{'score': 0.333, 'label': ANY(A_ )},
{'score': 0.333, 'label': ANY(A_ )},
{'score': 0.333, 'label': ANY(A_ )},
],
[
{'score': 0.333, 'label': ANY(A_ )},
{'score': 0.333, 'label': ANY(A_ )},
{'score': 0.333, 'label': ANY(A_ )},
],
[
{'score': 0.333, 'label': ANY(A_ )},
{'score': 0.333, 'label': ANY(A_ )},
{'score': 0.333, 'label': ANY(A_ )},
],
] , )
@slow
@require_torch
def snake_case_ ( self ):
__lowerCamelCase : Tuple = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , )
# This is an image of 2 cats with remotes and no planes
__lowerCamelCase : Tuple = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
__lowerCamelCase : List[Any] = image_classifier(A_ , candidate_labels=['cat', 'plane', 'remote'] )
self.assertEqual(
nested_simplify(A_ ) , [
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
] , )
__lowerCamelCase : Dict = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2 )
self.assertEqual(
nested_simplify(A_ ) , [
[
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
],
]
* 5 , )
@slow
@require_tf
def snake_case_ ( self ):
__lowerCamelCase : int = pipeline(
task='zero-shot-image-classification' , model='openai/clip-vit-base-patch32' , framework='tf' )
# This is an image of 2 cats with remotes and no planes
__lowerCamelCase : int = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
__lowerCamelCase : Union[str, Any] = image_classifier(A_ , candidate_labels=['cat', 'plane', 'remote'] )
self.assertEqual(
nested_simplify(A_ ) , [
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
] , )
__lowerCamelCase : Dict = image_classifier([image] * 5 , candidate_labels=['cat', 'plane', 'remote'] , batch_size=2 )
self.assertEqual(
nested_simplify(A_ ) , [
[
{'score': 0.511, 'label': 'remote'},
{'score': 0.485, 'label': 'cat'},
{'score': 0.004, 'label': 'plane'},
],
]
* 5 , )
| 700 |
"""simple docstring"""
import pytest
import datasets
# Import fixture modules as plugins
a_ : Tuple = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def UpperCAmelCase ( A__: List[Any] , A__: Optional[Any] ) -> List[str]:
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def UpperCAmelCase ( A__: int ) -> Optional[int]:
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=A__ )
def UpperCAmelCase ( A__: Optional[Any] , A__: List[Any] ) -> Any:
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
__lowerCamelCase : str = tmp_path_factory.getbasetemp() / 'cache'
__lowerCamelCase : Dict = test_hf_cache_home / 'datasets'
__lowerCamelCase : int = test_hf_cache_home / 'metrics'
__lowerCamelCase : List[str] = test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(A__ ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(A__ ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(A__ ) )
__lowerCamelCase : List[Any] = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(A__ ) )
__lowerCamelCase : Optional[int] = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(A__ ) )
@pytest.fixture(autouse=A__ , scope='session' )
def UpperCAmelCase ( ) -> Any:
datasets.disable_progress_bar()
@pytest.fixture(autouse=A__ )
def UpperCAmelCase ( A__: Optional[Any] ) -> Dict:
# don't take tests into account when counting downloads
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , A__ )
@pytest.fixture
def UpperCAmelCase ( A__: Union[str, Any] ) -> Optional[Any]:
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , A__ )
| 263 | 0 |
from statistics import mean, stdev
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase = 3 ):
snake_case__ = min(__lowerCAmelCase )
snake_case__ = max(__lowerCAmelCase )
# normalize data
return [round((x - x_min) / (x_max - x_min) , __lowerCAmelCase ) for x in data]
def SCREAMING_SNAKE_CASE__ ( __lowerCAmelCase , __lowerCAmelCase = 3 ):
snake_case__ = mean(__lowerCAmelCase )
snake_case__ = stdev(__lowerCAmelCase )
# standardize data
return [round((x - mu) / (sigma) , __lowerCAmelCase ) for x in data]
| 276 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class _SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self , lowerCamelCase , lowerCamelCase=7 , lowerCamelCase=3 , lowerCamelCase=30 , lowerCamelCase=4_00 , lowerCamelCase=True , lowerCamelCase=None , lowerCamelCase=True , lowerCamelCase=1 / 2_55 , lowerCamelCase=True , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=[0.5, 0.5, 0.5] , lowerCamelCase=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case__ = size if size is not None else {"shortest_edge": 18, "longest_edge": 13_33}
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = num_channels
snake_case__ = min_resolution
snake_case__ = max_resolution
snake_case__ = do_resize
snake_case__ = size
snake_case__ = do_rescale
snake_case__ = rescale_factor
snake_case__ = do_normalize
snake_case__ = image_mean
snake_case__ = image_std
snake_case__ = do_pad
def A_ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def A_ ( self , lowerCamelCase , lowerCamelCase=False ):
if not batched:
snake_case__ = image_inputs[0]
if isinstance(lowerCamelCase , Image.Image ):
snake_case__ , snake_case__ = image.size
else:
snake_case__ , snake_case__ = image.shape[1], image.shape[2]
if w < h:
snake_case__ = int(self.size["shortest_edge"] * h / w )
snake_case__ = self.size["shortest_edge"]
elif w > h:
snake_case__ = self.size["shortest_edge"]
snake_case__ = int(self.size["shortest_edge"] * w / h )
else:
snake_case__ = self.size["shortest_edge"]
snake_case__ = self.size["shortest_edge"]
else:
snake_case__ = []
for image in image_inputs:
snake_case__ , snake_case__ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case__ = max(lowerCamelCase , key=lambda lowerCamelCase : item[0] )[0]
snake_case__ = max(lowerCamelCase , key=lambda lowerCamelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class _SCREAMING_SNAKE_CASE ( __UpperCamelCase , unittest.TestCase ):
_A : Optional[Any] = DetrImageProcessor if is_vision_available() else None
def A_ ( self ):
snake_case__ = DetrImageProcessingTester(self )
@property
def A_ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def A_ ( self ):
snake_case__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase , "image_mean" ) )
self.assertTrue(hasattr(lowerCamelCase , "image_std" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_rescale" ) )
self.assertTrue(hasattr(lowerCamelCase , "rescale_factor" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCamelCase , "size" ) )
self.assertTrue(hasattr(lowerCamelCase , "do_pad" ) )
def A_ ( self ):
snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 13_33} )
self.assertEqual(image_processor.do_pad , lowerCamelCase )
snake_case__ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCamelCase )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , lowerCamelCase )
def A_ ( self ):
pass
def A_ ( self ):
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , Image.Image )
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
snake_case__ = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self ):
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , np.ndarray )
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A_ ( self ):
# Initialize image_processing
snake_case__ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCamelCase , torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase , torch.Tensor )
# Test not batched input
snake_case__ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(lowerCamelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ = image_processing(lowerCamelCase , return_tensors="pt" ).pixel_values
snake_case__ , snake_case__ = self.image_processor_tester.get_expected_values(lowerCamelCase , batched=lowerCamelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def A_ ( self ):
# prepare image and target
snake_case__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case__ = json.loads(f.read() )
snake_case__ = {"image_id": 3_97_69, "annotations": target}
# encode them
snake_case__ = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50" )
snake_case__ = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , return_tensors="pt" )
# verify pixel values
snake_case__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
snake_case__ = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1e-4 ) )
# verify area
snake_case__ = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
snake_case__ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
snake_case__ = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1e-3 ) )
# verify image_id
snake_case__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
snake_case__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
snake_case__ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify orig_size
snake_case__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
snake_case__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
@slow
def A_ ( self ):
# prepare image, target and masks_path
snake_case__ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case__ = json.loads(f.read() )
snake_case__ = {"file_name": "000000039769.png", "image_id": 3_97_69, "segments_info": target}
snake_case__ = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case__ = DetrImageProcessor.from_pretrained("facebook/detr-resnet-50-panoptic" )
snake_case__ = image_processing(images=lowerCamelCase , annotations=lowerCamelCase , masks_path=lowerCamelCase , return_tensors="pt" )
# verify pixel values
snake_case__ = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding["pixel_values"].shape , lowerCamelCase )
snake_case__ = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCamelCase , atol=1e-4 ) )
# verify area
snake_case__ = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCamelCase ) )
# verify boxes
snake_case__ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCamelCase )
snake_case__ = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCamelCase , atol=1e-3 ) )
# verify image_id
snake_case__ = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCamelCase ) )
# verify is_crowd
snake_case__ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCamelCase ) )
# verify class_labels
snake_case__ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCamelCase ) )
# verify masks
snake_case__ = 82_28_73
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCamelCase )
# verify orig_size
snake_case__ = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCamelCase ) )
# verify size
snake_case__ = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCamelCase ) )
| 276 | 1 |
__UpperCamelCase : int = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
__UpperCamelCase : Union[str, Any] = [{'type': 'code', 'content': INSTALL_CONTENT}]
__UpperCamelCase : Union[str, Any] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 718 |
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
__UpperCamelCase : Optional[int] = 8
def a_ ( _A , _A=BITS ) -> List[Any]:
"""simple docstring"""
snake_case__ = x.device
snake_case__ = (x * 255).int().clamp(0 , 255 )
snake_case__ = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_A )
snake_case__ = rearrange(_A , 'd -> d 1 1' )
snake_case__ = rearrange(_A , 'b c h w -> b c 1 h w' )
snake_case__ = ((x & mask) != 0).float()
snake_case__ = rearrange(_A , 'b c d h w -> b (c d) h w' )
snake_case__ = bits * 2 - 1
return bits
def a_ ( _A , _A=BITS ) -> List[str]:
"""simple docstring"""
snake_case__ = x.device
snake_case__ = (x > 0).int()
snake_case__ = 2 ** torch.arange(bits - 1 , -1 , -1 , device=_A , dtype=torch.intaa )
snake_case__ = rearrange(_A , 'd -> d 1 1' )
snake_case__ = rearrange(_A , 'b (c d) h w -> b c d h w' , d=8 )
snake_case__ = reduce(x * mask , 'b c d h w -> b c h w' , 'sum' )
return (dec / 255).clamp(0.0 , 1.0 )
def a_ ( self , _A , _A , _A , _A = 0.0 , _A = True , _A=None , _A = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
"""simple docstring"""
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
snake_case__ = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
snake_case__ = self.alphas_cumprod[timestep]
snake_case__ = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
snake_case__ = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
snake_case__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
snake_case__ = self.bit_scale
if self.config.clip_sample:
snake_case__ = torch.clamp(_A , -scale , _A )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
snake_case__ = self._get_variance(_A , _A )
snake_case__ = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
snake_case__ = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
snake_case__ = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
snake_case__ = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
snake_case__ = model_output.device if torch.is_tensor(_A ) else 'cpu'
snake_case__ = torch.randn(model_output.shape , dtype=model_output.dtype , generator=_A ).to(_A )
snake_case__ = self._get_variance(_A , _A ) ** 0.5 * eta * noise
snake_case__ = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=_A , pred_original_sample=_A )
def a_ ( self , _A , _A , _A , _A="epsilon" , _A=None , _A = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
"""simple docstring"""
snake_case__ = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
snake_case__ , snake_case__ = torch.split(_A , sample.shape[1] , dim=1 )
else:
snake_case__ = None
# 1. compute alphas, betas
snake_case__ = self.alphas_cumprod[t]
snake_case__ = self.alphas_cumprod[t - 1] if t > 0 else self.one
snake_case__ = 1 - alpha_prod_t
snake_case__ = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
snake_case__ = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
snake_case__ = model_output
else:
raise ValueError(f'''Unsupported prediction_type {prediction_type}.''' )
# 3. Clip "predicted x_0"
snake_case__ = self.bit_scale
if self.config.clip_sample:
snake_case__ = torch.clamp(_A , -scale , _A )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case__ = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
snake_case__ = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case__ = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
snake_case__ = 0
if t > 0:
snake_case__ = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=_A ).to(model_output.device )
snake_case__ = (self._get_variance(_A , predicted_variance=_A ) ** 0.5) * noise
snake_case__ = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=_A , pred_original_sample=_A )
class __SCREAMING_SNAKE_CASE( a_ ):
def __init__( self: Dict , UpperCamelCase: UNetaDConditionModel , UpperCamelCase: Union[DDIMScheduler, DDPMScheduler] , UpperCamelCase: Optional[float] = 1.0 , ) -> Union[str, Any]:
super().__init__()
snake_case__ = bit_scale
snake_case__ = (
ddim_bit_scheduler_step if isinstance(UpperCamelCase , UpperCamelCase ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=UpperCamelCase , scheduler=UpperCamelCase )
@torch.no_grad()
def __call__( self: Dict , UpperCamelCase: Optional[int] = 2_56 , UpperCamelCase: Optional[int] = 2_56 , UpperCamelCase: Optional[int] = 50 , UpperCamelCase: Optional[torch.Generator] = None , UpperCamelCase: Optional[int] = 1 , UpperCamelCase: Optional[str] = "pil" , UpperCamelCase: bool = True , **UpperCamelCase: int , ) -> Union[Tuple, ImagePipelineOutput]:
snake_case__ = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=UpperCamelCase , )
snake_case__ = decimal_to_bits(UpperCamelCase ) * self.bit_scale
snake_case__ = latents.to(self.device )
self.scheduler.set_timesteps(UpperCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
snake_case__ = self.unet(UpperCamelCase , UpperCamelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
snake_case__ = self.scheduler.step(UpperCamelCase , UpperCamelCase , UpperCamelCase ).prev_sample
snake_case__ = bits_to_decimal(UpperCamelCase )
if output_type == "pil":
snake_case__ = self.numpy_to_pil(UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCamelCase )
| 372 | 0 |
"""simple docstring"""
from maths.prime_factors import prime_factors
def _A (__a ) -> int:
"""simple docstring"""
if not isinstance(__a , __a ):
SCREAMING_SNAKE_CASE_ : Tuple = f'Input value of [number={number}] must be an integer'
raise TypeError(__a )
if number < 1:
raise ValueError('''Input must be a positive integer''' )
return -1 if len(prime_factors(__a ) ) % 2 else 1
if __name__ == "__main__":
import doctest
doctest.testmod()
| 512 |
"""simple docstring"""
# limitations under the License.
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from .pipelines import DiffusionPipeline, ImagePipelineOutput # noqa: F401
from .utils import deprecate
deprecate(
"""pipelines_utils""",
"""0.22.0""",
"""Importing `DiffusionPipeline` or `ImagePipelineOutput` from diffusers.pipeline_utils is deprecated. Please import from diffusers.pipelines.pipeline_utils instead.""",
standard_warn=False,
stacklevel=3,
)
| 512 | 1 |
'''simple docstring'''
from scipy.stats import spearmanr
import datasets
lowercase__ = '\nThe Spearman rank-order correlation coefficient is a measure of the\nrelationship between two datasets. Like other correlation coefficients,\nthis one varies between -1 and +1 with 0 implying no correlation.\nPositive correlations imply that as data in dataset x increases, so\ndoes data in dataset y. Negative correlations imply that as x increases,\ny decreases. Correlations of -1 or +1 imply an exact monotonic relationship.\n\nUnlike the Pearson correlation, the Spearman correlation does not\nassume that both datasets are normally distributed.\n\nThe p-value roughly indicates the probability of an uncorrelated system\nproducing datasets that have a Spearman correlation at least as extreme\nas the one computed from these datasets. The p-values are not entirely\nreliable but are probably reasonable for datasets larger than 500 or so.\n'
lowercase__ = '\nArgs:\n predictions (`List[float]`): Predicted labels, as returned by a model.\n references (`List[float]`): Ground truth labels.\n return_pvalue (`bool`): If `True`, returns the p-value. If `False`, returns\n only the spearmanr score. Defaults to `False`.\nReturns:\n spearmanr (`float`): Spearman correlation coefficient.\n p-value (`float`): p-value. **Note**: is only returned if `return_pvalue=True` is input.\nExamples:\n Example 1:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5], predictions=[10, 9, 2.5, 6, 4])\n >>> print(results)\n {\'spearmanr\': -0.7}\n\n Example 2:\n >>> spearmanr_metric = datasets.load_metric("spearmanr")\n >>> results = spearmanr_metric.compute(references=[1, 2, 3, 4, 5],\n ... predictions=[10, 9, 2.5, 6, 4],\n ... return_pvalue=True)\n >>> print(results[\'spearmanr\'])\n -0.7\n >>> print(round(results[\'spearmanr_pvalue\'], 2))\n 0.19\n'
lowercase__ = R'\\n@book{kokoska2000crc,\n title={CRC standard probability and statistics tables and formulae},\n author={Kokoska, Stephen and Zwillinger, Daniel},\n year={2000},\n publisher={Crc Press}\n}\n@article{2020SciPy-NMeth,\n author = {Virtanen, Pauli and Gommers, Ralf and Oliphant, Travis E. and\n Haberland, Matt and Reddy, Tyler and Cournapeau, David and\n Burovski, Evgeni and Peterson, Pearu and Weckesser, Warren and\n Bright, Jonathan and {van der Walt}, St{\'e}fan J. and\n Brett, Matthew and Wilson, Joshua and Millman, K. Jarrod and\n Mayorov, Nikolay and Nelson, Andrew R. J. and Jones, Eric and\n Kern, Robert and Larson, Eric and Carey, C J and\n Polat, {\.I}lhan and Feng, Yu and Moore, Eric W. and\n {VanderPlas}, Jake and Laxalde, Denis and Perktold, Josef and\n Cimrman, Robert and Henriksen, Ian and Quintero, E. A. and\n Harris, Charles R. and Archibald, Anne M. and\n Ribeiro, Ant{\^o}nio H. and Pedregosa, Fabian and\n {van Mulbregt}, Paul and {SciPy 1.0 Contributors}},\n title = {{{SciPy} 1.0: Fundamental Algorithms for Scientific\n Computing in Python}},\n journal = {Nature Methods},\n year = {2020},\n volume = {17},\n pages = {261--272},\n adsurl = {https://rdcu.be/b08Wh},\n doi = {10.1038/s41592-019-0686-2},\n}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class A_ ( datasets.Metric ):
'''simple docstring'''
def UpperCAmelCase_ ( self : List[str] ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('float' ),
'references': datasets.Value('float' ),
} ) , reference_urls=['https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.spearmanr.html'] , )
def UpperCAmelCase_ ( self : Tuple , lowercase_ : str , lowercase_ : Optional[Any] , lowercase_ : Optional[int]=False ) -> List[Any]:
UpperCAmelCase : Tuple = spearmanr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if return_pvalue:
return {"spearmanr": results[0], "spearmanr_pvalue": results[1]}
else:
return {"spearmanr": results[0]}
| 709 |
'''simple docstring'''
def UpperCamelCase( UpperCAmelCase_ = 10_00 ):
UpperCAmelCase , UpperCAmelCase : Any = 1, 1
UpperCAmelCase : Any = []
for i in range(1 , n + 1 ):
UpperCAmelCase : Tuple = prev_numerator + 2 * prev_denominator
UpperCAmelCase : Any = prev_numerator + prev_denominator
if len(str(UpperCAmelCase_ ) ) > len(str(UpperCAmelCase_ ) ):
result.append(UpperCAmelCase_ )
UpperCAmelCase : Dict = numerator
UpperCAmelCase : Dict = denominator
return len(UpperCAmelCase_ )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 695 | 0 |
A : Optional[int] = [
'DownloadConfig',
'DownloadManager',
'DownloadMode',
'StreamingDownloadManager',
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 371 |
def _lowerCAmelCase ( _lowerCAmelCase = 100 ) -> int:
'''simple docstring'''
__snake_case = n * (n + 1) * (2 * n + 1) / 6
__snake_case = (n * (n + 1) / 2) ** 2
return int(square_of_sum - sum_of_squares )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 371 | 1 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
__lowercase : int = "bart"
__lowercase : Dict = True
@st.cache(allow_output_mutation=snake_case)
def SCREAMING_SNAKE_CASE ( ):
if LOAD_DENSE_INDEX:
__snake_case = AutoTokenizer.from_pretrained('''yjernite/retribert-base-uncased''')
__snake_case = AutoModel.from_pretrained('''yjernite/retribert-base-uncased''').to('''cuda:0''')
__snake_case = qar_model.eval()
else:
__snake_case , __snake_case = (None, None)
if MODEL_TYPE == "bart":
__snake_case = AutoTokenizer.from_pretrained('''yjernite/bart_eli5''')
__snake_case = AutoModelForSeqaSeqLM.from_pretrained('''yjernite/bart_eli5''').to('''cuda:0''')
__snake_case = torch.load('''seq2seq_models/eli5_bart_model_blm_2.pth''')
sas_model.load_state_dict(save_dict['''model'''])
__snake_case = sas_model.eval()
else:
__snake_case , __snake_case = make_qa_sas_model(
model_name='''t5-small''', from_file='''seq2seq_models/eli5_t5_model_1024_4.pth''', device='''cuda:0''')
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=snake_case)
def SCREAMING_SNAKE_CASE ( ):
if LOAD_DENSE_INDEX:
__snake_case = faiss.StandardGpuResources()
__snake_case = datasets.load_dataset(path='''wiki_snippets''', name='''wiki40b_en_100_0''')['''train''']
__snake_case = np.memmap(
'''wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat''', dtype='''float32''', mode='''r''', shape=(wikiaab_passages.num_rows, 1_28), )
__snake_case = faiss.IndexFlatIP(1_28)
__snake_case = faiss.index_cpu_to_gpu(snake_case, 1, snake_case)
wikiaab_gpu_index_flat.add(snake_case) # TODO fix for larger GPU
else:
__snake_case , __snake_case = (None, None)
__snake_case = Elasticsearch([{'''host''': '''localhost''', '''port''': '''9200'''}])
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=snake_case)
def SCREAMING_SNAKE_CASE ( ):
__snake_case = datasets.load_dataset('''eli5''', name='''LFQA_reddit''')
__snake_case = elia['''train_eli5''']
__snake_case = np.memmap(
'''eli5_questions_reps.dat''', dtype='''float32''', mode='''r''', shape=(elia_train.num_rows, 1_28))
__snake_case = faiss.IndexFlatIP(1_28)
eli5_train_q_index.add(snake_case)
return (elia_train, eli5_train_q_index)
__lowercase ,__lowercase ,__lowercase : Any = load_indexes()
__lowercase ,__lowercase ,__lowercase ,__lowercase : int = load_models()
__lowercase ,__lowercase : Union[str, Any] = load_train_data()
def SCREAMING_SNAKE_CASE ( snake_case, snake_case=10):
__snake_case = embed_questions_for_retrieval([question], snake_case, snake_case)
__snake_case , __snake_case = eli5_train_q_index.search(snake_case, snake_case)
__snake_case = [elia_train[int(snake_case)] for i in I[0]]
return nn_examples
def SCREAMING_SNAKE_CASE ( snake_case, snake_case="wiki40b", snake_case="dense", snake_case=10):
if source == "none":
__snake_case , __snake_case = (''' <P> '''.join(['''''' for _ in range(11)]).strip(), [])
else:
if method == "dense":
__snake_case , __snake_case = query_qa_dense_index(
snake_case, snake_case, snake_case, snake_case, snake_case, snake_case)
else:
__snake_case , __snake_case = query_es_index(
snake_case, snake_case, index_name='''english_wiki40b_snippets_100w''', n_results=snake_case, )
__snake_case = [
(res['''article_title'''], res['''section_title'''].strip(), res['''score'''], res['''passage_text''']) for res in hit_lst
]
__snake_case = '''question: {} context: {}'''.format(snake_case, snake_case)
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda snake_case: None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda snake_case: None),
})
def SCREAMING_SNAKE_CASE ( snake_case, snake_case, snake_case, snake_case=64, snake_case=2_56, snake_case=False, snake_case=2, snake_case=0.95, snake_case=0.8):
with torch.no_grad():
__snake_case = qa_sas_generate(
snake_case, snake_case, snake_case, num_answers=1, num_beams=snake_case, min_len=snake_case, max_len=snake_case, do_sample=snake_case, temp=snake_case, top_p=snake_case, top_k=snake_case, max_input_length=10_24, device='''cuda:0''', )[0]
return (answer, support_list)
st.title("Long Form Question Answering with ELI5")
# Start sidebar
__lowercase : Dict = "<img src='https://huggingface.co/front/assets/huggingface_logo.svg'>"
__lowercase : Any = "\n<html>\n <head>\n <style>\n .img-container {\n padding-left: 90px;\n padding-right: 90px;\n padding-top: 50px;\n padding-bottom: 50px;\n background-color: #f0f3f9;\n }\n </style>\n </head>\n <body>\n <span class=\"img-container\"> <!-- Inline parent element -->\n %s\n </span>\n </body>\n</html>\n" % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
__lowercase : Optional[int] = "\nThis demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).\nFirst, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,\na pre-processed fixed snapshot of Wikipedia.\n"
st.sidebar.markdown(description, unsafe_allow_html=True)
__lowercase : List[str] = [
"Answer the question",
"View the retrieved document only",
"View the most similar ELI5 question and answer",
"Show me everything, please!",
]
__lowercase : Dict = st.sidebar.checkbox("Demo options")
if demo_options:
__lowercase : Union[str, Any] = st.sidebar.selectbox(
"",
action_list,
index=3,
)
__lowercase : Union[str, Any] = action_list.index(action_st)
__lowercase : str = st.sidebar.selectbox(
"",
["Show full text of passages", "Show passage section titles"],
index=0,
)
__lowercase : Optional[Any] = show_type == "Show full text of passages"
else:
__lowercase : Optional[Any] = 3
__lowercase : List[Any] = True
__lowercase : int = st.sidebar.checkbox("Retrieval options")
if retrieval_options:
__lowercase : Tuple = "\n ### Information retriever options\n\n The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding\n trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.\n The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.\n "
st.sidebar.markdown(retriever_info)
__lowercase : List[Any] = st.sidebar.selectbox("Which Wikipedia format should the model use?", ["wiki40b", "none"])
__lowercase : Any = st.sidebar.selectbox("Which Wikipedia indexer should the model use?", ["dense", "sparse", "mixed"])
else:
__lowercase : Tuple = "wiki40b"
__lowercase : Optional[int] = "dense"
__lowercase : str = "beam"
__lowercase : str = 2
__lowercase : int = 64
__lowercase : Optional[int] = 256
__lowercase : Dict = None
__lowercase : str = None
__lowercase : int = st.sidebar.checkbox("Generation options")
if generate_options:
__lowercase : Tuple = "\n ### Answer generation options\n\n The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)\n weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with\n **beam** search, or **sample** from the decoder's output probabilities.\n "
st.sidebar.markdown(generate_info)
__lowercase : Optional[int] = st.sidebar.selectbox("Would you like to use beam search or sample an answer?", ["beam", "sampled"])
__lowercase : Optional[int] = st.sidebar.slider(
"Minimum generation length", min_value=8, max_value=256, value=64, step=8, format=None, key=None
)
__lowercase : Any = st.sidebar.slider(
"Maximum generation length", min_value=64, max_value=512, value=256, step=16, format=None, key=None
)
if sampled == "beam":
__lowercase : List[Any] = st.sidebar.slider("Beam size", min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
__lowercase : Tuple = st.sidebar.slider(
"Nucleus sampling p", min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
__lowercase : Any = st.sidebar.slider(
"Temperature", min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
__lowercase : Union[str, Any] = None
# start main text
__lowercase : List[str] = [
"<MY QUESTION>",
"How do people make chocolate?",
"Why do we get a fever when we are sick?",
"How can different animals perceive different colors?",
"What is natural language processing?",
"What's the best way to treat a sunburn?",
"What exactly are vitamins ?",
"How does nuclear energy provide electricity?",
"What's the difference between viruses and bacteria?",
"Why are flutes classified as woodwinds when most of them are made out of metal ?",
"Why do people like drinking coffee even though it tastes so bad?",
"What happens when wine ages? How does it make the wine taste better?",
"If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?",
"How can we set a date to the beginning or end of an artistic period? Doesn't the change happen gradually?",
"How does New Zealand have so many large bird predators?",
]
__lowercase : Dict = st.selectbox(
"What would you like to ask? ---- select <MY QUESTION> to enter a new query",
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
__lowercase : Optional[int] = st.text_input("Enter your question here:", "")
else:
__lowercase : Tuple = question_s
if st.button("Show me!"):
if action in [0, 1, 3]:
if index_type == "mixed":
__lowercase ,__lowercase : List[Any] = make_support(question, source=wiki_source, method="dense", n_results=10)
__lowercase ,__lowercase : Any = make_support(question, source=wiki_source, method="sparse", n_results=10)
__lowercase : List[Any] = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
__lowercase : Dict = support_list[:10]
__lowercase : Any = "<P> " + " <P> ".join([res[-1] for res in support_list])
else:
__lowercase ,__lowercase : Optional[Any] = make_support(question, source=wiki_source, method=index_type, n_results=10)
if action in [0, 3]:
__lowercase ,__lowercase : Optional[int] = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == "sampled"),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown("### The model generated answer is:")
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown("--- \n ### The model is drawing information from the following Wikipedia passages:")
for i, res in enumerate(support_list):
__lowercase : str = "https://en.wikipedia.org/wiki/{}".format(res[0].replace(" ", "_"))
__lowercase : List[str] = res[1].strip()
if sec_titles == "":
__lowercase : Any = "[{}]({})".format(res[0], wiki_url)
else:
__lowercase : Optional[int] = sec_titles.split(" & ")
__lowercase : List[Any] = " & ".join(
["[{}]({}#{})".format(sec.strip(), wiki_url, sec.strip().replace(" ", "_")) for sec in sec_list]
)
st.markdown(
"{0:02d} - **Article**: {1:<18} <br> _Section_: {2}".format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
"> <span style=\"font-family:arial; font-size:10pt;\">" + res[-1] + "</span>", unsafe_allow_html=True
)
if action in [2, 3]:
__lowercase : List[str] = find_nearest_training(question)
__lowercase : Dict = nn_train_list[0]
st.markdown(
"--- \n ### The most similar question in the ELI5 training set was: \n\n {}".format(train_exple["title"])
)
__lowercase : Optional[int] = [
"{}. {}".format(i + 1, " \n".join([line.strip() for line in ans.split("\n") if line.strip() != ""]))
for i, (ans, sc) in enumerate(zip(train_exple["answers"]["text"], train_exple["answers"]["score"]))
if i == 0 or sc > 2
]
st.markdown("##### Its answers were: \n\n {}".format("\n".join(answers_st)))
__lowercase : Optional[int] = "\n---\n\n**Disclaimer**\n\n*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.\nEvaluating biases of such a model and ensuring factual generations are still very much open research problems.\nTherefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*\n"
st.sidebar.markdown(disclaimer, unsafe_allow_html=True) | 93 | """simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class _A ( unittest.TestCase ):
"""simple docstring"""
def lowercase ( self : Optional[int] ) -> List[str]:
__snake_case = tempfile.mkdtemp()
# fmt: off
__snake_case = ['''l''', '''o''', '''w''', '''e''', '''r''', '''s''', '''t''', '''i''', '''d''', '''n''', '''lo''', '''l</w>''', '''w</w>''', '''r</w>''', '''t</w>''', '''low</w>''', '''er</w>''', '''lowest</w>''', '''newer</w>''', '''wider''', '''<unk>''', '''<|startoftext|>''', '''<|endoftext|>''']
# fmt: on
__snake_case = dict(zip(A_ , range(len(A_ ) ) ) )
__snake_case = ['''#version: 0.2''', '''l o''', '''lo w</w>''', '''e r</w>''', '''''']
__snake_case = {'''unk_token''': '''<unk>'''}
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(A_ ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(A_ ) )
__snake_case = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'''image_std''': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
__snake_case = os.path.join(self.tmpdirname , A_ )
with open(self.image_processor_file , '''w''' , encoding='''utf-8''' ) as fp:
json.dump(A_ , A_ )
def lowercase ( self : Optional[Any] , **A_ : Dict ) -> Any:
return CLIPTokenizer.from_pretrained(self.tmpdirname , **A_ )
def lowercase ( self : Optional[int] , **A_ : str ) -> str:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **A_ )
def lowercase ( self : Any , **A_ : Tuple ) -> Tuple:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **A_ )
def lowercase ( self : Optional[int] ) -> Optional[Any]:
shutil.rmtree(self.tmpdirname )
def lowercase ( self : int ) -> Optional[Any]:
__snake_case = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
__snake_case = [Image.fromarray(np.moveaxis(A_ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowercase ( self : Optional[Any] ) -> Optional[Any]:
__snake_case = self.get_tokenizer()
__snake_case = self.get_rust_tokenizer()
__snake_case = self.get_image_processor()
__snake_case = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ )
processor_slow.save_pretrained(self.tmpdirname )
__snake_case = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=A_ )
__snake_case = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ )
processor_fast.save_pretrained(self.tmpdirname )
__snake_case = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A_ )
self.assertIsInstance(processor_fast.tokenizer , A_ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A_ )
self.assertIsInstance(processor_fast.image_processor , A_ )
def lowercase ( self : Union[str, Any] ) -> Any:
__snake_case = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__snake_case = self.get_tokenizer(bos_token='''(BOS)''' , eos_token='''(EOS)''' )
__snake_case = self.get_image_processor(do_normalize=A_ , padding_value=1.0 )
__snake_case = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token='''(BOS)''' , eos_token='''(EOS)''' , do_normalize=A_ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A_ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A_ )
def lowercase ( self : Any ) -> str:
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ )
__snake_case = self.prepare_image_inputs()
__snake_case = image_processor(A_ , return_tensors='''np''' )
__snake_case = processor(images=A_ , return_tensors='''np''' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowercase ( self : List[str] ) -> List[Any]:
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ )
__snake_case = '''lower newer'''
__snake_case = processor(text=A_ )
__snake_case = tokenizer(A_ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowercase ( self : List[Any] ) -> str:
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ )
__snake_case = '''lower newer'''
__snake_case = self.prepare_image_inputs()
__snake_case = processor(text=A_ , images=A_ )
self.assertListEqual(list(inputs.keys() ) , ['''input_ids''', '''attention_mask''', '''pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def lowercase ( self : Union[str, Any] ) -> Any:
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ )
__snake_case = self.prepare_image_inputs()
__snake_case = self.prepare_image_inputs()
__snake_case = processor(images=A_ , visual_prompt=A_ )
self.assertListEqual(list(inputs.keys() ) , ['''pixel_values''', '''conditional_pixel_values'''] )
# test if it raises when no input is passed
with pytest.raises(A_ ):
processor()
def lowercase ( self : Optional[int] ) -> Dict:
__snake_case = self.get_image_processor()
__snake_case = self.get_tokenizer()
__snake_case = CLIPSegProcessor(tokenizer=A_ , image_processor=A_ )
__snake_case = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__snake_case = processor.batch_decode(A_ )
__snake_case = tokenizer.batch_decode(A_ )
self.assertListEqual(A_ , A_ ) | 93 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.