code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
'''simple docstring'''
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class UpperCamelCase__ :
"""simple docstring"""
pass
| 311 |
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> np.ndarray:
_a : Union[str, Any] = cva.getAffineTransform(lowerCAmelCase_ , lowerCAmelCase_ )
return cva.warpAffine(lowerCAmelCase_ , lowerCAmelCase_ , (rows, cols) )
if __name__ == "__main__":
# read original image
__lowerCAmelCase = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
__lowerCAmelCase = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
__lowerCAmelCase , __lowerCAmelCase = gray_img.shape
# set different points to rotate image
__lowerCAmelCase = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
__lowerCAmelCase = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
__lowerCAmelCase = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
__lowerCAmelCase = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
__lowerCAmelCase = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
__lowerCAmelCase = plt.figure(1)
__lowerCAmelCase = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 89 | 0 |
'''simple docstring'''
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor | 152 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
'''configuration_bigbird_pegasus''': [
'''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BigBirdPegasusConfig''',
'''BigBirdPegasusOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BigBirdPegasusForCausalLM''',
'''BigBirdPegasusForConditionalGeneration''',
'''BigBirdPegasusForQuestionAnswering''',
'''BigBirdPegasusForSequenceClassification''',
'''BigBirdPegasusModel''',
'''BigBirdPegasusPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89 | 0 |
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class __snake_case ( _UpperCamelCase ):
def __init__( self ) -> int:
'''simple docstring'''
snake_case__ : Dict = []
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
self.events.append('on_init_end' )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ) -> List[str]:
'''simple docstring'''
self.events.append('on_train_begin' )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
self.events.append('on_train_end' )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ) -> List[str]:
'''simple docstring'''
self.events.append('on_epoch_begin' )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ) -> Any:
'''simple docstring'''
self.events.append('on_epoch_end' )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ) -> Dict:
'''simple docstring'''
self.events.append('on_step_begin' )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ) -> Any:
'''simple docstring'''
self.events.append('on_step_end' )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ) -> Dict:
'''simple docstring'''
self.events.append('on_evaluate' )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
self.events.append('on_predict' )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ) -> Any:
'''simple docstring'''
self.events.append('on_save' )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
self.events.append('on_log' )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , **__UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
self.events.append('on_prediction_step' )
@require_torch
class __snake_case ( unittest.TestCase ):
def __a ( self ) -> str:
'''simple docstring'''
snake_case__ : Optional[int] = tempfile.mkdtemp()
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.output_dir )
def __a ( self , __UpperCamelCase=0 , __UpperCamelCase=0 , __UpperCamelCase=64 , __UpperCamelCase=64 , __UpperCamelCase=None , __UpperCamelCase=False , **__UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : int = RegressionDataset(length=_UpperCAmelCase )
snake_case__ : str = RegressionDataset(length=_UpperCAmelCase )
snake_case__ : List[Any] = RegressionModelConfig(a=_UpperCAmelCase , b=_UpperCAmelCase )
snake_case__ : Optional[int] = RegressionPreTrainedModel(_UpperCAmelCase )
snake_case__ : Optional[int] = TrainingArguments(self.output_dir , disable_tqdm=_UpperCAmelCase , report_to=[] , **_UpperCAmelCase )
return Trainer(
_UpperCAmelCase , _UpperCAmelCase , train_dataset=_UpperCAmelCase , eval_dataset=_UpperCAmelCase , callbacks=_UpperCAmelCase , )
def __a ( self , __UpperCamelCase , __UpperCamelCase ) -> str:
'''simple docstring'''
self.assertEqual(len(_UpperCAmelCase ) , len(_UpperCAmelCase ) )
# Order doesn't matter
snake_case__ : List[Any] = sorted(_UpperCAmelCase , key=lambda __UpperCamelCase : cb.__name__ if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else cb.__class__.__name__ )
snake_case__ : List[str] = sorted(_UpperCAmelCase , key=lambda __UpperCamelCase : cb.__name__ if isinstance(_UpperCAmelCase , _UpperCAmelCase ) else cb.__class__.__name__ )
for cba, cba in zip(_UpperCAmelCase , _UpperCAmelCase ):
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ) and not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(_UpperCAmelCase , cba.__class__ )
elif not isinstance(_UpperCAmelCase , _UpperCAmelCase ) and isinstance(_UpperCAmelCase , _UpperCAmelCase ):
self.assertEqual(cba.__class__ , _UpperCAmelCase )
else:
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
def __a ( self , __UpperCamelCase ) -> int:
'''simple docstring'''
snake_case__ : str = ['on_init_end', 'on_train_begin']
snake_case__ : str = 0
snake_case__ : Optional[int] = len(trainer.get_eval_dataloader() )
snake_case__ : List[Any] = ['on_prediction_step'] * len(trainer.get_eval_dataloader() ) + ['on_log', 'on_evaluate']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('on_epoch_begin' )
for _ in range(_UpperCAmelCase ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('on_log' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('on_save' )
expected_events.append('on_epoch_end' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def __a ( self ) -> List[Any]:
'''simple docstring'''
snake_case__ : str = self.get_trainer()
snake_case__ : Union[str, Any] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , _UpperCAmelCase )
# Callbacks passed at init are added to the default callbacks
snake_case__ : Optional[int] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(_UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _UpperCAmelCase )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
snake_case__ : Union[str, Any] = self.get_trainer(disable_tqdm=_UpperCAmelCase )
snake_case__ : Union[str, Any] = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , _UpperCAmelCase )
def __a ( self ) -> Optional[Any]:
'''simple docstring'''
snake_case__ : List[str] = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
snake_case__ : Optional[int] = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(_UpperCAmelCase )
expected_callbacks.remove(_UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _UpperCAmelCase )
snake_case__ : Optional[Any] = self.get_trainer()
snake_case__ : List[Any] = trainer.pop_callback(_UpperCAmelCase )
self.assertEqual(cb.__class__ , _UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _UpperCAmelCase )
trainer.add_callback(_UpperCAmelCase )
expected_callbacks.insert(0 , _UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _UpperCAmelCase )
# We can also add, pop, or remove by instance
snake_case__ : str = self.get_trainer()
snake_case__ : Optional[Any] = trainer.callback_handler.callbacks[0]
trainer.remove_callback(_UpperCAmelCase )
expected_callbacks.remove(_UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _UpperCAmelCase )
snake_case__ : Tuple = self.get_trainer()
snake_case__ : Optional[Any] = trainer.callback_handler.callbacks[0]
snake_case__ : Optional[Any] = trainer.pop_callback(_UpperCAmelCase )
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _UpperCAmelCase )
trainer.add_callback(_UpperCAmelCase )
expected_callbacks.insert(0 , _UpperCAmelCase )
self.check_callbacks_equality(trainer.callback_handler.callbacks , _UpperCAmelCase )
def __a ( self ) -> Any:
'''simple docstring'''
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='ignore' , category=_UpperCAmelCase )
snake_case__ : List[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
snake_case__ : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_UpperCAmelCase , self.get_expected_events(_UpperCAmelCase ) )
# Independent log/save/eval
snake_case__ : Optional[Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
snake_case__ : Optional[int] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_UpperCAmelCase , self.get_expected_events(_UpperCAmelCase ) )
snake_case__ : Dict = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
snake_case__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_UpperCAmelCase , self.get_expected_events(_UpperCAmelCase ) )
snake_case__ : Dict = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='steps' )
trainer.train()
snake_case__ : Tuple = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_UpperCAmelCase , self.get_expected_events(_UpperCAmelCase ) )
snake_case__ : Union[str, Any] = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='epoch' )
trainer.train()
snake_case__ : str = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_UpperCAmelCase , self.get_expected_events(_UpperCAmelCase ) )
# A bit of everything
snake_case__ : Optional[int] = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='steps' , )
trainer.train()
snake_case__ : List[str] = trainer.callback_handler.callbacks[-2].events
self.assertEqual(_UpperCAmelCase , self.get_expected_events(_UpperCAmelCase ) )
# warning should be emitted for duplicated callbacks
with patch('transformers.trainer_callback.logger.warning' ) as warn_mock:
snake_case__ : List[str] = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(_UpperCAmelCase ) in warn_mock.call_args[0][0]
| 143 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1024 , lowerCAmelCase_=1024 , lowerCAmelCase_=False , **lowerCAmelCase_ ) -> List[Any]:
_a : str = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
_a : List[Any] = SeqaSeqDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , type_path='train' , **lowerCAmelCase_ )
_a : List[str] = tok.pad_token_id
def get_lens(lowerCAmelCase_ ):
_a : Dict = tqdm(
DataLoader(lowerCAmelCase_ , batch_size=512 , num_workers=8 , shuffle=lowerCAmelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
_a : Union[str, Any] = []
for batch in dl:
_a : Optional[Any] = batch['input_ids'].ne(lowerCAmelCase_ ).sum(1 ).tolist()
_a : Optional[Any] = batch['labels'].ne(lowerCAmelCase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
max_lens.append(max(lowerCAmelCase_ , lowerCAmelCase_ ) )
else:
max_lens.extend(lowerCAmelCase_ )
return max_lens
_a : str = get_lens(lowerCAmelCase_ )
_a : Optional[int] = SeqaSeqDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , type_path='val' , **lowerCAmelCase_ )
_a : Dict = get_lens(lowerCAmelCase_ )
pickle_save(lowerCAmelCase_ , train_ds.len_file )
pickle_save(lowerCAmelCase_ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 89 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
_a = logging.get_logger(__name__)
_a = {'''vocab_file''': '''vocab.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_a = {
'''vocab_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt'''
),
},
'''tokenizer_file''': {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json'''
),
},
}
_a = {
'''yjernite/retribert-base-uncased''': 512,
}
_a = {
'''yjernite/retribert-base-uncased''': {'''do_lower_case''': True},
}
class __lowerCamelCase ( _UpperCamelCase):
"""simple docstring"""
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase__ = RetriBertTokenizer
UpperCamelCase__ = ['input_ids', 'attention_mask']
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=True , UpperCAmelCase="[UNK]" , UpperCAmelCase="[SEP]" , UpperCAmelCase="[PAD]" , UpperCAmelCase="[CLS]" , UpperCAmelCase="[MASK]" , UpperCAmelCase=True , UpperCAmelCase=None , **UpperCAmelCase , ):
"""simple docstring"""
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , tokenize_chinese_chars=_UpperCAmelCase , strip_accents=_UpperCAmelCase , **_UpperCAmelCase , )
_UpperCAmelCase = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' , _UpperCAmelCase ) != do_lower_case
or normalizer_state.get('strip_accents' , _UpperCAmelCase ) != strip_accents
or normalizer_state.get('handle_chinese_chars' , _UpperCAmelCase ) != tokenize_chinese_chars
):
_UpperCAmelCase = getattr(_UpperCAmelCase , normalizer_state.pop('type' ) )
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = strip_accents
_UpperCAmelCase = tokenize_chinese_chars
_UpperCAmelCase = normalizer_class(**_UpperCAmelCase )
_UpperCAmelCase = do_lower_case
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase=None ):
"""simple docstring"""
_UpperCAmelCase = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = None ):
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = None ):
"""simple docstring"""
_UpperCAmelCase = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
| 39 |
'''simple docstring'''
from typing import Any
class __magic_name__ :
def __init__( self : List[Any] ,_UpperCAmelCase : Any ):
_a : List[Any] = data
_a : Union[str, Any] = None
def __repr__( self : Any ):
return F"""Node({self.data})"""
class __magic_name__ :
def __init__( self : int ):
_a : Tuple = None
def __iter__( self : str ):
_a : int = self.head
while node:
yield node.data
_a : Union[str, Any] = node.next
def __len__( self : Optional[Any] ):
return sum(1 for _ in self )
def __repr__( self : str ):
return "->".join([str(_UpperCAmelCase ) for item in self] )
def __getitem__( self : Tuple ,_UpperCAmelCase : int ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : Union[str, Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Any ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
_a : Any = self.head
for _ in range(_UpperCAmelCase ):
_a : Optional[Any] = current.next
_a : Optional[int] = data
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Any ):
self.insert_nth(len(self ) ,_UpperCAmelCase )
def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : Any ):
self.insert_nth(0 ,_UpperCAmelCase )
def __lowercase ( self : str ,_UpperCAmelCase : int ,_UpperCAmelCase : Any ):
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
_a : int = Node(_UpperCAmelCase )
if self.head is None:
_a : str = new_node
elif index == 0:
_a : List[str] = self.head # link new_node to head
_a : Union[str, Any] = new_node
else:
_a : int = self.head
for _ in range(index - 1 ):
_a : Union[str, Any] = temp.next
_a : List[str] = temp.next
_a : Optional[int] = new_node
def __lowercase ( self : Optional[int] ): # print every node data
print(self )
def __lowercase ( self : str ):
return self.delete_nth(0 )
def __lowercase ( self : str ): # delete from tail
return self.delete_nth(len(self ) - 1 )
def __lowercase ( self : List[str] ,_UpperCAmelCase : int = 0 ):
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
_a : Optional[Any] = self.head # default first node
if index == 0:
_a : int = self.head.next
else:
_a : int = self.head
for _ in range(index - 1 ):
_a : str = temp.next
_a : str = temp.next
_a : int = temp.next.next
return delete_node.data
def __lowercase ( self : List[Any] ):
return self.head is None
def __lowercase ( self : Tuple ):
_a : List[Any] = None
_a : Tuple = self.head
while current:
# Store the current node's next node.
_a : Dict = current.next
# Make the current node's next point backwards
_a : str = prev
# Make the previous node be the current node
_a : Tuple = current
# Make the current node the next node (to progress iteration)
_a : Optional[Any] = next_node
# Return prev in order to put the head at the end
_a : int = prev
def __lowerCamelCase ( ) -> None:
_a : List[str] = LinkedList()
assert linked_list.is_empty() is True
assert str(lowerCAmelCase_ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(lowerCAmelCase_ ) == i
linked_list.insert_nth(lowerCAmelCase_ , i + 1 )
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(lowerCAmelCase_ ) == 9
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
_a : Union[str, Any] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(-8 , 1 ) )
def __lowerCamelCase ( ) -> None:
_a : Dict = [
-9,
100,
Node(77345112 ),
'dlrow olleH',
7,
5555,
0,
-192.55_555,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
_a : List[Any] = LinkedList()
for i in test_input:
linked_list.insert_tail(lowerCAmelCase_ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(lowerCAmelCase_ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
_a : List[str] = linked_list.delete_head()
assert result == -9
assert (
str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
_a : Dict = linked_list.delete_tail()
assert result == 12.2
assert (
str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
_a : Optional[Any] = linked_list.delete_nth(10 )
assert result is None
assert (
str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(lowerCAmelCase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(lowerCAmelCase_ )
assert (
str(lowerCAmelCase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(lowerCAmelCase_ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def __lowerCamelCase ( ) -> Union[str, Any]:
from doctest import testmod
testmod()
_a : Optional[int] = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(lowerCAmelCase_ )
print('\nReading/changing Node data using indexing:' )
print(f"""Element at Position 1: {linked_list[1]}""" )
_a : Optional[Any] = input('Enter New Value: ' ).strip()
print('New list:' )
print(lowerCAmelCase_ )
print(f"""length of linked_list is : {len(lowerCAmelCase_ )}""" )
if __name__ == "__main__":
main()
| 89 | 0 |
import os
import random
import sys
from . import cryptomath_module as cryptomath
from . import rabin_miller
A : List[str] = 3
def __lowerCamelCase ( __a :Any ) -> int:
"""simple docstring"""
print("""Generating primitive root of p""" )
while True:
A__ = random.randrange(3 , lowerCAmelCase_ )
if pow(lowerCAmelCase_ , 2 , lowerCAmelCase_ ) == 1:
continue
if pow(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) == 1:
continue
return g
def __lowerCamelCase ( __a :List[str] ) -> tuple[tuple[int, int, int, int], tuple[int, int]]:
"""simple docstring"""
print("""Generating prime p...""" )
A__ = rabin_miller.generate_large_prime(lowerCAmelCase_ ) # select large prime number.
A__ = primitive_root(lowerCAmelCase_ ) # one primitive root on modulo p.
A__ = random.randrange(3 , lowerCAmelCase_ ) # private_key -> have to be greater than 2 for safety.
A__ = cryptomath.find_mod_inverse(pow(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) , lowerCAmelCase_ )
A__ = (key_size, e_a, e_a, p)
A__ = (key_size, d)
return public_key, private_key
def __lowerCamelCase ( __a :Tuple , __a :List[str] ) -> None:
"""simple docstring"""
if os.path.exists(F'{name}_pubkey.txt' ) or os.path.exists(F'{name}_privkey.txt' ):
print("""\nWARNING:""" )
print(
F'\"{name}_pubkey.txt\" or \"{name}_privkey.txt\" already exists. \n'
"""Use a different name or delete these files and re-run this program.""" )
sys.exit()
A__ = generate_key(lowerCAmelCase_ )
print(F'\nWriting public key to file {name}_pubkey.txt...' )
with open(F'{name}_pubkey.txt' , """w""" ) as fo:
fo.write(F'{public_key[0]},{public_key[1]},{public_key[2]},{public_key[3]}' )
print(F'Writing private key to file {name}_privkey.txt...' )
with open(F'{name}_privkey.txt' , """w""" ) as fo:
fo.write(F'{private_key[0]},{private_key[1]}' )
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
print("""Making key files...""" )
make_key_files("""elgamal""" , 2_0_4_8 )
print("""Key files generation successful""" )
if __name__ == "__main__":
main()
| 274 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__lowerCAmelCase = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class __magic_name__ ( unittest.TestCase ):
def __lowercase ( self : str ,_UpperCAmelCase : Path ,_UpperCAmelCase : Union[str, None] = None ,_UpperCAmelCase : Union[List[str], None] = None ,_UpperCAmelCase : Union[str, List[str], None] = None ,_UpperCAmelCase : bool = True ,):
_a : Dict = [file for file in os.listdir(_UpperCAmelCase ) if os.path.isfile(os.path.join(_UpperCAmelCase ,_UpperCAmelCase ) )]
if identifier is not None:
_a : str = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
for n_ in n_identifier:
_a : int = [file for file in files if n_ not in file]
else:
_a : Optional[Any] = [file for file in files if n_identifier not in file]
_a : Dict = ignore_files or []
ignore_files.append('__init__.py' )
_a : List[str] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' ,_UpperCAmelCase )
if only_modules:
_a : Any = file.split('.' )[0]
try:
_a : Optional[int] = getattr(_UpperCAmelCase ,_UpperCAmelCase )
_a : Dict = doctest.DocTestSuite(_UpperCAmelCase )
_a : Optional[int] = unittest.TextTestRunner().run(_UpperCAmelCase )
self.assertIs(len(result.failures ) ,0 )
except AttributeError:
logger.info(F"""{module_identifier} is not a module.""" )
else:
_a : str = doctest.testfile(str('..' / directory / file ) ,optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed ,0 )
def __lowercase ( self : Union[str, Any] ):
_a : Optional[Any] = Path('src/transformers' )
_a : Optional[Any] = 'modeling'
_a : Union[str, Any] = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(_UpperCAmelCase ,identifier=_UpperCAmelCase ,ignore_files=_UpperCAmelCase )
def __lowercase ( self : int ):
_a : str = Path('src/transformers' )
_a : List[str] = 'tokenization'
self.analyze_directory(_UpperCAmelCase ,identifier=_UpperCAmelCase )
def __lowercase ( self : int ):
_a : Any = Path('src/transformers' )
_a : str = 'configuration'
self.analyze_directory(_UpperCAmelCase ,identifier=_UpperCAmelCase )
def __lowercase ( self : Dict ):
_a : Tuple = Path('src/transformers' )
_a : Optional[int] = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(_UpperCAmelCase ,n_identifier=_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ):
_a : Union[str, Any] = Path('docs/source' )
_a : List[str] = ['favicon.ico']
self.analyze_directory(_UpperCAmelCase ,ignore_files=_UpperCAmelCase ,only_modules=_UpperCAmelCase )
| 89 | 0 |
from datetime import datetime as dt
import os
from github import Github
__lowerCamelCase : Union[str, Any] = [
'''good first issue''',
'''good second issue''',
'''good difficult issue''',
'''feature request''',
'''new model''',
'''wip''',
]
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = Github(os.environ["GITHUB_TOKEN"] )
SCREAMING_SNAKE_CASE_ : List[Any] = g.get_repo("huggingface/transformers" )
SCREAMING_SNAKE_CASE_ : Tuple = repo.get_issues(state="open" )
for issue in open_issues:
SCREAMING_SNAKE_CASE_ : str = sorted([comment for comment in issue.get_comments()] , key=lambda lowerCAmelCase : i.created_at , reverse=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] = comments[0] if len(lowerCAmelCase_ ) > 0 else None
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would close issue {issue.number} since it has been 7 days of inactivity since bot mention.")
issue.edit(state="closed" )
elif (
(dt.utcnow() - issue.updated_at).days > 2_3
and (dt.utcnow() - issue.created_at).days >= 3_0
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# print(f"Would add stale comment to {issue.number}")
issue.create_comment(
"This issue has been automatically marked as stale because it has not had "
"recent activity. If you think this still needs to be addressed "
"please comment on this thread.\n\nPlease note that issues that do not follow the "
"[contributing guidelines](https://github.com/huggingface/transformers/blob/main/CONTRIBUTING.md) "
"are likely to be ignored." )
if __name__ == "__main__":
main()
| 18 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
__lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def __lowerCamelCase ( lowerCAmelCase_ ) -> Optional[Any]:
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
_a : List[Any] = model_type_to_module_name(lowerCAmelCase_ )
_a : Optional[Any] = importlib.import_module(f""".{module_name}""" , 'transformers.models' )
try:
return getattr(lowerCAmelCase_ , lowerCAmelCase_ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(lowerCAmelCase_ , '__name__' , lowerCAmelCase_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_a : Dict = importlib.import_module('transformers' )
if hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
return getattr(lowerCAmelCase_ , lowerCAmelCase_ )
return None
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , **lowerCAmelCase_ , ) -> Tuple:
_a : List[str] = get_file_from_repo(
lowerCAmelCase_ , lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , resume_download=lowerCAmelCase_ , proxies=lowerCAmelCase_ , use_auth_token=lowerCAmelCase_ , revision=lowerCAmelCase_ , local_files_only=lowerCAmelCase_ , )
if resolved_config_file is None:
logger.info(
'Could not locate the image processor configuration file, will try to use the model config instead.' )
return {}
with open(lowerCAmelCase_ , encoding='utf-8' ) as reader:
return json.load(lowerCAmelCase_ )
class __magic_name__ :
def __init__( self : List[str] ):
raise EnvironmentError(
'AutoImageProcessor is designed to be instantiated '
'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(_UpperCAmelCase )
def __lowercase ( cls : Dict ,_UpperCAmelCase : Union[str, Any] ,**_UpperCAmelCase : Optional[Any] ):
_a : Any = kwargs.pop('config' ,_UpperCAmelCase )
_a : Dict = kwargs.pop('trust_remote_code' ,_UpperCAmelCase )
_a : Any = True
_a , _a : Tuple = ImageProcessingMixin.get_image_processor_dict(_UpperCAmelCase ,**_UpperCAmelCase )
_a : List[Any] = config_dict.get('image_processor_type' ,_UpperCAmelCase )
_a : int = None
if "AutoImageProcessor" in config_dict.get('auto_map' ,{} ):
_a : Any = config_dict['auto_map']['AutoImageProcessor']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
_a : List[Any] = config_dict.pop('feature_extractor_type' ,_UpperCAmelCase )
if feature_extractor_class is not None:
logger.warning(
'Could not find image processor class in the image processor config or the model config. Loading'
' based on pattern matching with the model\'s feature extractor configuration.' )
_a : Optional[int] = feature_extractor_class.replace('FeatureExtractor' ,'ImageProcessor' )
if "AutoFeatureExtractor" in config_dict.get('auto_map' ,{} ):
_a : List[Any] = config_dict['auto_map']['AutoFeatureExtractor']
_a : List[str] = feature_extractor_auto_map.replace('FeatureExtractor' ,'ImageProcessor' )
logger.warning(
'Could not find image processor auto map in the image processor config or the model config.'
' Loading based on pattern matching with the model\'s feature extractor configuration.' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : Dict = AutoConfig.from_pretrained(_UpperCAmelCase ,**_UpperCAmelCase )
# It could be in `config.image_processor_type``
_a : Optional[int] = getattr(_UpperCAmelCase ,'image_processor_type' ,_UpperCAmelCase )
if hasattr(_UpperCAmelCase ,'auto_map' ) and "AutoImageProcessor" in config.auto_map:
_a : Union[str, Any] = config.auto_map['AutoImageProcessor']
if image_processor_class is not None:
_a : Optional[int] = image_processor_class_from_name(_UpperCAmelCase )
_a : List[str] = image_processor_auto_map is not None
_a : Optional[int] = image_processor_class is not None or type(_UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING
_a : Optional[int] = resolve_trust_remote_code(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
if has_remote_code and trust_remote_code:
_a : Dict = get_class_from_dynamic_module(
_UpperCAmelCase ,_UpperCAmelCase ,**_UpperCAmelCase )
_a : int = kwargs.pop('code_revision' ,_UpperCAmelCase )
if os.path.isdir(_UpperCAmelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(_UpperCAmelCase ,**_UpperCAmelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(_UpperCAmelCase ,**_UpperCAmelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(_UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING:
_a : Dict = IMAGE_PROCESSOR_MAPPING[type(_UpperCAmelCase )]
return image_processor_class.from_dict(_UpperCAmelCase ,**_UpperCAmelCase )
raise ValueError(
F"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """
F"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def __lowercase ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Dict ):
IMAGE_PROCESSOR_MAPPING.register(_UpperCAmelCase ,_UpperCAmelCase )
| 89 | 0 |
import numpy
# List of input, output pairs
lowercase__ : Tuple = (
((5, 2, 3), 1_5),
((6, 5, 9), 2_5),
((1_1, 1_2, 1_3), 4_1),
((1, 1, 1), 8),
((1_1, 1_2, 1_3), 4_1),
)
lowercase__ : Any = (((5_1_5, 2_2, 1_3), 5_5_5), ((6_1, 3_5, 4_9), 1_5_0))
lowercase__ : Optional[int] = [2, 4, 1, 5]
lowercase__ : List[Any] = len(train_data)
lowercase__ : Dict = 0.0_0_9
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__="train" ) -> Tuple:
return calculate_hypothesis_value(lowerCAmelCase_ , lowerCAmelCase_ ) - output(
lowerCAmelCase_ , lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Dict:
lowerCAmelCase = 0
for i in range(len(lowerCAmelCase_ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> str:
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ ) -> List[str]:
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__=m ) -> List[str]:
lowerCAmelCase = 0
for i in range(lowerCAmelCase_ ):
if index == -1:
summation_value += _error(lowerCAmelCase_ )
else:
summation_value += _error(lowerCAmelCase_ ) * train_data[i][0][index]
return summation_value
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Tuple:
lowerCAmelCase = summation_of_cost_derivative(lowerCAmelCase_ , lowerCAmelCase_ ) / m
return cost_derivative_value
def SCREAMING_SNAKE_CASE_ ( ) -> List[str]:
global parameter_vector
# Tune these values to set a tolerance value for predicted output
lowerCAmelCase = 0.00_00_02
lowerCAmelCase = 0
lowerCAmelCase = 0
while True:
j += 1
lowerCAmelCase = [0, 0, 0, 0]
for i in range(0 , len(lowerCAmelCase_ ) ):
lowerCAmelCase = get_cost_derivative(i - 1 )
lowerCAmelCase = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
lowerCAmelCase_ , lowerCAmelCase_ , atol=lowerCAmelCase_ , rtol=lowerCAmelCase_ , ):
break
lowerCAmelCase = temp_parameter_vector
print(('''Number of iterations:''', j) )
def SCREAMING_SNAKE_CASE_ ( ) -> str:
for i in range(len(lowerCAmelCase_ ) ):
print(('''Actual output value:''', output(lowerCAmelCase_ , '''test''' )) )
print(('''Hypothesis output:''', calculate_hypothesis_value(lowerCAmelCase_ , '''test''' )) )
if __name__ == "__main__":
run_gradient_descent()
print('''\nTesting gradient descent for a linear hypothesis function.\n''')
test_gradient_descent()
| 338 |
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__lowerCAmelCase = None
__lowerCAmelCase = '''<''' if sys.byteorder == '''little''' else '''>'''
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__lowerCAmelCase = [
np.dtype('''|b1'''),
np.dtype('''|u1'''),
np.dtype('''<u2'''),
np.dtype('''>u2'''),
np.dtype('''<i2'''),
np.dtype('''>i2'''),
np.dtype('''<u4'''),
np.dtype('''>u4'''),
np.dtype('''<i4'''),
np.dtype('''>i4'''),
np.dtype('''<f4'''),
np.dtype('''>f4'''),
np.dtype('''<f8'''),
np.dtype('''>f8'''),
]
@dataclass
class __magic_name__ :
lowerCAmelCase : bool = True
lowerCAmelCase : Optional[str] = None
# Automatically constructed
lowerCAmelCase : ClassVar[str] = "PIL.Image.Image"
lowerCAmelCase : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
lowerCAmelCase : str = field(default='Image' , init=_UpperCamelCase , repr=_UpperCamelCase )
def __call__( self : Union[str, Any] ):
return self.pa_type
def __lowercase ( self : Any ,_UpperCAmelCase : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : Optional[Any] = np.array(_UpperCAmelCase )
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
return {"path": value, "bytes": None}
elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
return {"path": None, "bytes": value}
elif isinstance(_UpperCAmelCase ,np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase ,PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_UpperCAmelCase )
elif value.get('path' ) is not None and os.path.isfile(value['path'] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('path' )}
elif value.get('bytes' ) is not None or value.get('path' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('bytes' ), "path": value.get('path' )}
else:
raise ValueError(
F"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : dict ,_UpperCAmelCase : Optional[int]=None ):
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Image(decode=True) instead.' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support decoding images, please install \'Pillow\'.' )
if token_per_repo_id is None:
_a : Dict = {}
_a , _a : str = value['path'], value['bytes']
if bytes_ is None:
if path is None:
raise ValueError(F"""An image should have one of 'path' or 'bytes' but both are None in {value}.""" )
else:
if is_local_path(_UpperCAmelCase ):
_a : Any = PIL.Image.open(_UpperCAmelCase )
else:
_a : List[Any] = path.split('::' )[-1]
try:
_a : str = string_to_dict(_UpperCAmelCase ,config.HUB_DATASETS_URL )['repo_id']
_a : Optional[Any] = token_per_repo_id.get(_UpperCAmelCase )
except ValueError:
_a : int = None
with xopen(_UpperCAmelCase ,'rb' ,use_auth_token=_UpperCAmelCase ) as f:
_a : Tuple = BytesIO(f.read() )
_a : Union[str, Any] = PIL.Image.open(bytes_ )
else:
_a : Optional[int] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def __lowercase ( self : int ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('binary' ),
"path": Value('string' ),
}
)
def __lowercase ( self : str ,_UpperCAmelCase : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
if pa.types.is_string(storage.type ):
_a : Union[str, Any] = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.binary() )
_a : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, storage] ,['bytes', 'path'] ,mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_a : List[str] = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() )
_a : Any = pa.StructArray.from_arrays([storage, path_array] ,['bytes', 'path'] ,mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('bytes' ) >= 0:
_a : Union[str, Any] = storage.field('bytes' )
else:
_a : Tuple = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.binary() )
if storage.type.get_field_index('path' ) >= 0:
_a : Union[str, Any] = storage.field('path' )
else:
_a : Dict = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() )
_a : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] ,['bytes', 'path'] ,mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
_a : List[str] = pa.array(
[encode_np_array(np.array(_UpperCAmelCase ) )['bytes'] if arr is not None else None for arr in storage.to_pylist()] ,type=pa.binary() ,)
_a : int = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() )
_a : Optional[Any] = pa.StructArray.from_arrays(
[bytes_array, path_array] ,['bytes', 'path'] ,mask=bytes_array.is_null() )
return array_cast(_UpperCAmelCase ,self.pa_type )
def __lowercase ( self : Dict ,_UpperCAmelCase : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(_UpperCAmelCase : Tuple ):
with xopen(_UpperCAmelCase ,'rb' ) as f:
_a : int = f.read()
return bytes_
_a : Any = pa.array(
[
(path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] ,type=pa.binary() ,)
_a : Optional[Any] = pa.array(
[os.path.basename(_UpperCAmelCase ) if path is not None else None for path in storage.field('path' ).to_pylist()] ,type=pa.string() ,)
_a : Dict = pa.StructArray.from_arrays([bytes_array, path_array] ,['bytes', 'path'] ,mask=bytes_array.is_null() )
return array_cast(_UpperCAmelCase ,self.pa_type )
def __lowerCamelCase ( ) -> List[str]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
_a : Dict = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def __lowerCamelCase ( lowerCAmelCase_ ) -> bytes:
_a : Optional[int] = BytesIO()
if image.format in list_image_compression_formats():
_a : Optional[Any] = image.format
else:
_a : str = 'PNG' if image.mode in ['1', 'L', 'LA', 'RGB', 'RGBA'] else 'TIFF'
image.save(lowerCAmelCase_ , format=lowerCAmelCase_ )
return buffer.getvalue()
def __lowerCamelCase ( lowerCAmelCase_ ) -> dict:
if hasattr(lowerCAmelCase_ , 'filename' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(lowerCAmelCase_ )}
def __lowerCamelCase ( lowerCAmelCase_ ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
_a : List[Any] = array.dtype
_a : Optional[int] = dtype.byteorder if dtype.byteorder != '=' else _NATIVE_BYTEORDER
_a : Union[str, Any] = dtype.kind
_a : Union[str, Any] = dtype.itemsize
_a : List[Any] = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
_a : Optional[int] = np.dtype('|u1' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" )
if dtype is not dest_dtype:
warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
_a : Union[str, Any] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
_a : str = dtype_byteorder + dtype_kind + str(lowerCAmelCase_ )
_a : List[Any] = np.dtype(lowerCAmelCase_ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" )
_a : Union[str, Any] = PIL.Image.fromarray(array.astype(lowerCAmelCase_ ) )
return {"path": None, "bytes": image_to_bytes(lowerCAmelCase_ )}
def __lowerCamelCase ( lowerCAmelCase_ ) -> List[dict]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if objs:
_a , _a : Optional[Any] = first_non_null_value(lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(lowerCAmelCase_ , np.ndarray ):
_a : List[str] = no_op_if_value_is_null(lowerCAmelCase_ )
return [obj_to_image_dict_func(lowerCAmelCase_ ) for obj in objs]
elif isinstance(lowerCAmelCase_ , PIL.Image.Image ):
_a : List[str] = no_op_if_value_is_null(lowerCAmelCase_ )
return [obj_to_image_dict_func(lowerCAmelCase_ ) for obj in objs]
else:
return objs
else:
return objs
| 89 | 0 |
"""simple docstring"""
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case : int = get_tests_dir('fixtures/test_sentencepiece.model')
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
__snake_case : Dict = 256_047
__snake_case : Optional[int] = 256_145
@require_sentencepiece
@require_tokenizers
class A__ ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = NllbTokenizer
SCREAMING_SNAKE_CASE = NllbTokenizerFast
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = {}
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Union[str, Any]:
"""simple docstring"""
super().setUp()
# We have a SentencePiece fixture for testing
__lowerCAmelCase : int = NllbTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase)
tokenizer.save_pretrained(self.tmpdirname)
def _SCREAMING_SNAKE_CASE ( self: Any) -> Optional[int]:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = NllbTokenizer(_UpperCAmelCase , keep_accents=_UpperCAmelCase)
__lowerCAmelCase : List[str] = tokenizer.tokenize("This is a test")
self.assertListEqual(_UpperCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_UpperCAmelCase) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__lowerCAmelCase : Optional[int] = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
__lowerCAmelCase : Tuple = tokenizer.convert_tokens_to_ids(_UpperCAmelCase)
self.assertListEqual(
_UpperCAmelCase , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] , )
__lowerCAmelCase : Dict = tokenizer.convert_ids_to_tokens(_UpperCAmelCase)
self.assertListEqual(
_UpperCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def _SCREAMING_SNAKE_CASE ( self: Optional[Any]) -> Optional[Any]:
"""simple docstring"""
__lowerCAmelCase : Tuple = (self.rust_tokenizer_class, 'hf-internal-testing/tiny-random-nllb', {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
__lowerCAmelCase : List[str] = self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase)
__lowerCAmelCase : Tuple = self.tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase)
__lowerCAmelCase : Tuple = tempfile.mkdtemp()
__lowerCAmelCase : str = tokenizer_r.save_pretrained(_UpperCAmelCase)
__lowerCAmelCase : str = tokenizer_p.save_pretrained(_UpperCAmelCase)
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files))
__lowerCAmelCase : int = tuple(f for f in tokenizer_r_files if "tokenizer.json" not in f)
self.assertSequenceEqual(_UpperCAmelCase , _UpperCAmelCase)
# Checks everything loads correctly in the same way
__lowerCAmelCase : Union[str, Any] = tokenizer_r.from_pretrained(_UpperCAmelCase)
__lowerCAmelCase : Optional[Any] = tokenizer_p.from_pretrained(_UpperCAmelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCAmelCase , _UpperCAmelCase))
shutil.rmtree(_UpperCAmelCase)
# Save tokenizer rust, legacy_format=True
__lowerCAmelCase : Any = tempfile.mkdtemp()
__lowerCAmelCase : Tuple = tokenizer_r.save_pretrained(_UpperCAmelCase , legacy_format=_UpperCAmelCase)
__lowerCAmelCase : Optional[int] = tokenizer_p.save_pretrained(_UpperCAmelCase)
# Checks it save with the same files
self.assertSequenceEqual(_UpperCAmelCase , _UpperCAmelCase)
# Checks everything loads correctly in the same way
__lowerCAmelCase : Optional[Any] = tokenizer_r.from_pretrained(_UpperCAmelCase)
__lowerCAmelCase : Optional[Any] = tokenizer_p.from_pretrained(_UpperCAmelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCAmelCase , _UpperCAmelCase))
shutil.rmtree(_UpperCAmelCase)
# Save tokenizer rust, legacy_format=False
__lowerCAmelCase : Dict = tempfile.mkdtemp()
__lowerCAmelCase : Any = tokenizer_r.save_pretrained(_UpperCAmelCase , legacy_format=_UpperCAmelCase)
__lowerCAmelCase : Union[str, Any] = tokenizer_p.save_pretrained(_UpperCAmelCase)
# Checks it saved the tokenizer.json file
self.assertTrue(any("tokenizer.json" in f for f in tokenizer_r_files))
# Checks everything loads correctly in the same way
__lowerCAmelCase : int = tokenizer_r.from_pretrained(_UpperCAmelCase)
__lowerCAmelCase : List[str] = tokenizer_p.from_pretrained(_UpperCAmelCase)
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(_UpperCAmelCase , _UpperCAmelCase))
shutil.rmtree(_UpperCAmelCase)
@require_torch
def _SCREAMING_SNAKE_CASE ( self: int) -> str:
"""simple docstring"""
if not self.test_seqaseq:
return
__lowerCAmelCase : List[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F"""{tokenizer.__class__.__name__}"""):
# Longer text that will definitely require truncation.
__lowerCAmelCase : Any = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for'
' Syria is that \'there is no military solution\' to the nearly five-year conflict and more weapons'
' will only worsen the violence and misery for millions of people.',
]
__lowerCAmelCase : Optional[Any] = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al'
' Rusiei pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi'
' că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
try:
__lowerCAmelCase : Optional[int] = tokenizer.prepare_seqaseq_batch(
src_texts=_UpperCAmelCase , tgt_texts=_UpperCAmelCase , max_length=3 , max_target_length=10 , return_tensors="pt" , src_lang="eng_Latn" , tgt_lang="ron_Latn" , )
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] , 3)
self.assertEqual(batch.labels.shape[1] , 10)
# max_target_length will default to max_length if not specified
__lowerCAmelCase : List[str] = tokenizer.prepare_seqaseq_batch(
_UpperCAmelCase , tgt_texts=_UpperCAmelCase , max_length=3 , return_tensors="pt")
self.assertEqual(batch.input_ids.shape[1] , 3)
self.assertEqual(batch.labels.shape[1] , 3)
__lowerCAmelCase : Dict = tokenizer.prepare_seqaseq_batch(
src_texts=_UpperCAmelCase , max_length=3 , max_target_length=10 , return_tensors="pt")
self.assertEqual(batch_encoder_only.input_ids.shape[1] , 3)
self.assertEqual(batch_encoder_only.attention_mask.shape[1] , 3)
self.assertNotIn("decoder_input_ids" , _UpperCAmelCase)
@unittest.skip("Unfortunately way too slow to build a BPE with SentencePiece.")
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> str:
"""simple docstring"""
pass
def _SCREAMING_SNAKE_CASE ( self: Dict) -> Tuple:
"""simple docstring"""
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"""{tokenizer.__class__.__name__} ({pretrained_name})"""):
__lowerCAmelCase : Dict = [AddedToken("<special>" , lstrip=_UpperCAmelCase)]
__lowerCAmelCase : Dict = self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase)
__lowerCAmelCase : Tuple = tokenizer_r.encode("Hey this is a <special> token")
__lowerCAmelCase : Dict = tokenizer_r.encode("<special>" , add_special_tokens=_UpperCAmelCase)[0]
self.assertTrue(special_token_id in r_output)
if self.test_slow_tokenizer:
__lowerCAmelCase : Tuple = self.rust_tokenizer_class.from_pretrained(
_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
__lowerCAmelCase : Any = self.tokenizer_class.from_pretrained(
_UpperCAmelCase , additional_special_tokens=_UpperCAmelCase , **_UpperCAmelCase)
__lowerCAmelCase : Tuple = tokenizer_p.encode("Hey this is a <special> token")
__lowerCAmelCase : Optional[Any] = tokenizer_cr.encode("Hey this is a <special> token")
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase)
self.assertTrue(special_token_id in p_output)
self.assertTrue(special_token_id in cr_output)
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'facebook/nllb-200-distilled-600M'
SCREAMING_SNAKE_CASE = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
SCREAMING_SNAKE_CASE = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
SCREAMING_SNAKE_CASE = [
2_5_6_0_4_7,
1_6_2_9_7,
1_3_4_4_0_8,
8_1_6_5,
2_4_8_0_6_6,
1_4_7_3_4,
9_5_0,
1_1_3_5,
1_0_5_7_2_1,
3_5_7_3,
8_3,
2_7_3_5_2,
1_0_8,
4_9_4_8_6,
2,
]
@classmethod
def _SCREAMING_SNAKE_CASE ( cls: Union[str, Any]) -> Dict:
"""simple docstring"""
__lowerCAmelCase : NllbTokenizer = NllbTokenizer.from_pretrained(
cls.checkpoint_name , src_lang="eng_Latn" , tgt_lang="ron_Latn")
__lowerCAmelCase : Optional[Any] = 1
return cls
def _SCREAMING_SNAKE_CASE ( self: Any) -> str:
"""simple docstring"""
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Arab"] , 25_6001)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["ace_Latn"] , 25_6002)
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["fra_Latn"] , 25_6057)
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : int = self.tokenizer.batch_encode_plus(self.src_text).input_ids[0]
self.assertListEqual(self.expected_src_tokens , _UpperCAmelCase)
def _SCREAMING_SNAKE_CASE ( self: Any) -> int:
"""simple docstring"""
self.assertIn(_UpperCAmelCase , self.tokenizer.all_special_ids)
# fmt: off
__lowerCAmelCase : Dict = [RO_CODE, 4254, 9_8068, 11_2923, 3_9072, 3909, 713, 10_2767, 26, 1_7314, 3_5642, 1_4683, 3_3118, 2022, 6_6987, 2, 25_6047]
# fmt: on
__lowerCAmelCase : Tuple = self.tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase)
__lowerCAmelCase : List[str] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=_UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase)
self.assertNotIn(self.tokenizer.eos_token , _UpperCAmelCase)
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : Optional[int] = ['this is gunna be a long sentence ' * 20]
assert isinstance(src_text[0] , _UpperCAmelCase)
__lowerCAmelCase : List[str] = 10
__lowerCAmelCase : List[Any] = self.tokenizer(_UpperCAmelCase , max_length=_UpperCAmelCase , truncation=_UpperCAmelCase).input_ids[0]
self.assertEqual(ids[-1] , 2)
self.assertEqual(ids[0] , _UpperCAmelCase)
self.assertEqual(len(_UpperCAmelCase) , _UpperCAmelCase)
def _SCREAMING_SNAKE_CASE ( self: Dict) -> str:
"""simple docstring"""
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "ar_AR"]) , [25_6203, 3])
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Dict:
"""simple docstring"""
__lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp()
__lowerCAmelCase : Tuple = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(_UpperCAmelCase)
__lowerCAmelCase : Tuple = NllbTokenizer.from_pretrained(_UpperCAmelCase)
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , _UpperCAmelCase)
@require_torch
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> str:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=len(self.expected_src_tokens) , return_tensors="pt" , )
__lowerCAmelCase : Tuple = shift_tokens_right(
batch["labels"] , self.tokenizer.pad_token_id , self.tokenizer.lang_code_to_id["ron_Latn"])
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase)
self.assertEqual((2, 15) , batch.input_ids.shape)
self.assertEqual((2, 15) , batch.attention_mask.shape)
__lowerCAmelCase : Optional[int] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , _UpperCAmelCase)
self.assertEqual(_UpperCAmelCase , batch.decoder_input_ids[0, 0]) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [EN_CODE])
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id])
def _SCREAMING_SNAKE_CASE ( self: List[str]) -> Dict:
"""simple docstring"""
__lowerCAmelCase : int = self.tokenizer(self.src_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=3 , return_tensors="pt")
__lowerCAmelCase : str = self.tokenizer(
text_target=self.tgt_text , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , max_length=10 , return_tensors="pt")
__lowerCAmelCase : Union[str, Any] = targets['input_ids']
__lowerCAmelCase : Dict = shift_tokens_right(
_UpperCAmelCase , self.tokenizer.pad_token_id , decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] , )
self.assertEqual(batch.input_ids.shape[1] , 3)
self.assertEqual(batch.decoder_input_ids.shape[1] , 10)
@require_torch
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Tuple:
"""simple docstring"""
__lowerCAmelCase : int = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="eng_Latn" , tgt_lang="fra_Latn")
self.assertEqual(
nested_simplify(_UpperCAmelCase) , {
# A, test, EOS, en_XX
"input_ids": [[25_6047, 70, 7356, 2]],
"attention_mask": [[1, 1, 1, 1]],
# ar_AR
"forced_bos_token_id": 25_6057,
} , )
@require_torch
def _SCREAMING_SNAKE_CASE ( self: Union[str, Any]) -> Any:
"""simple docstring"""
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : str = self.tokenizer(
"UN Chief says there is no military solution in Syria" , src_lang="eng_Latn" , tgt_lang="fra_Latn")
self.assertEqual(
inputs.input_ids , [1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2, 25_6047])
__lowerCAmelCase : Tuple = False
__lowerCAmelCase : str = self.tokenizer(
"UN Chief says there is no military solution in Syria" , src_lang="eng_Latn" , tgt_lang="fra_Latn")
self.assertEqual(
inputs.input_ids , [25_6047, 1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2]) | 269 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str | Literal[False]:
_a : Optional[int] = list(lowerCAmelCase_ )
_a : Optional[Any] = list(lowerCAmelCase_ )
_a : Union[str, Any] = 0
for i in range(len(lowerCAmelCase_ ) ):
if lista[i] != lista[i]:
count += 1
_a : Optional[int] = '_'
if count > 1:
return False
else:
return "".join(lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ ) -> list[str]:
_a : Optional[int] = []
while True:
_a : Any = ['$'] * len(lowerCAmelCase_ )
_a : List[str] = []
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
_a : Optional[int] = compare_string(binary[i] , binary[j] )
if k is False:
_a : Optional[Any] = '*'
_a : Optional[Any] = '*'
temp.append('X' )
for i in range(len(lowerCAmelCase_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(lowerCAmelCase_ ) == 0:
return pi
_a : Any = list(set(lowerCAmelCase_ ) )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]:
_a : int = []
for minterm in minterms:
_a : Optional[int] = ''
for _ in range(lowerCAmelCase_ ):
_a : Union[str, Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(lowerCAmelCase_ )
return temp
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> bool:
_a : int = list(lowerCAmelCase_ )
_a : Union[str, Any] = list(lowerCAmelCase_ )
_a : str = 0
for i in range(len(lowerCAmelCase_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]:
_a : List[Any] = []
_a : Optional[Any] = [0] * len(lowerCAmelCase_ )
for i in range(len(chart[0] ) ):
_a : Union[str, Any] = 0
_a : int = -1
for j in range(len(lowerCAmelCase_ ) ):
if chart[j][i] == 1:
count += 1
_a : int = j
if count == 1:
_a : List[Any] = 1
for i in range(len(lowerCAmelCase_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(lowerCAmelCase_ ) ):
_a : Any = 0
temp.append(prime_implicants[i] )
while True:
_a : Union[str, Any] = 0
_a : List[Any] = -1
_a : str = 0
for i in range(len(lowerCAmelCase_ ) ):
_a : Union[str, Any] = chart[i].count(1 )
if count_n > max_n:
_a : Any = count_n
_a : int = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(lowerCAmelCase_ ) ):
_a : List[str] = 0
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[list[int]]:
_a : int = [[0 for x in range(len(lowerCAmelCase_ ) )] for x in range(len(lowerCAmelCase_ ) )]
for i in range(len(lowerCAmelCase_ ) ):
_a : str = prime_implicants[i].count('_' )
for j in range(len(lowerCAmelCase_ ) ):
if is_for_table(prime_implicants[i] , binary[j] , lowerCAmelCase_ ):
_a : Optional[Any] = 1
return chart
def __lowerCamelCase ( ) -> None:
_a : Optional[int] = int(input('Enter the no. of variables\n' ) )
_a : List[Any] = [
float(lowerCAmelCase_ )
for x in input(
'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split()
]
_a : List[str] = decimal_to_binary(lowerCAmelCase_ , lowerCAmelCase_ )
_a : Dict = check(lowerCAmelCase_ )
print('Prime Implicants are:' )
print(lowerCAmelCase_ )
_a : List[Any] = prime_implicant_chart(lowerCAmelCase_ , lowerCAmelCase_ )
_a : int = selection(lowerCAmelCase_ , lowerCAmelCase_ )
print('Essential Prime Implicants are:' )
print(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 89 | 0 |
import argparse
import torch
from transformers import GPTaLMHeadModel, RobertaForMaskedLM
if __name__ == "__main__":
__lowerCamelCase : Optional[Any] = argparse.ArgumentParser(
description=(
"""Extraction some layers of the full RobertaForMaskedLM or GPT2LMHeadModel for Transfer Learned"""
""" Distillation"""
)
)
parser.add_argument("""--model_type""", default="""roberta""", choices=["""roberta""", """gpt2"""])
parser.add_argument("""--model_name""", default="""roberta-large""", type=str)
parser.add_argument("""--dump_checkpoint""", default="""serialization_dir/tf_roberta_048131723.pth""", type=str)
parser.add_argument("""--vocab_transform""", action="""store_true""")
__lowerCamelCase : Any = parser.parse_args()
if args.model_type == "roberta":
__lowerCamelCase : Dict = RobertaForMaskedLM.from_pretrained(args.model_name)
__lowerCamelCase : List[str] = """roberta"""
elif args.model_type == "gpt2":
__lowerCamelCase : Tuple = GPTaLMHeadModel.from_pretrained(args.model_name)
__lowerCamelCase : Any = """transformer"""
__lowerCamelCase : Union[str, Any] = model.state_dict()
__lowerCamelCase : Optional[int] = {}
# Embeddings #
if args.model_type == "gpt2":
for param_name in ["wte.weight", "wpe.weight"]:
__lowerCamelCase : List[Any] = state_dict[f"""{prefix}.{param_name}"""]
else:
for w in ["word_embeddings", "position_embeddings", "token_type_embeddings"]:
__lowerCamelCase : Tuple = f"""{prefix}.embeddings.{w}.weight"""
__lowerCamelCase : Optional[Any] = state_dict[param_name]
for w in ["weight", "bias"]:
__lowerCamelCase : Union[str, Any] = f"""{prefix}.embeddings.LayerNorm.{w}"""
__lowerCamelCase : Tuple = state_dict[param_name]
# Transformer Blocks #
__lowerCamelCase : List[str] = 0
for teacher_idx in [0, 2, 4, 7, 9, 11]:
if args.model_type == "gpt2":
for layer in ["ln_1", "attn.c_attn", "attn.c_proj", "ln_2", "mlp.c_fc", "mlp.c_proj"]:
for w in ["weight", "bias"]:
__lowerCamelCase : Union[str, Any] = state_dict[
f"""{prefix}.h.{teacher_idx}.{layer}.{w}"""
]
__lowerCamelCase : Optional[Any] = state_dict[f"""{prefix}.h.{teacher_idx}.attn.bias"""]
else:
for layer in [
"attention.self.query",
"attention.self.key",
"attention.self.value",
"attention.output.dense",
"attention.output.LayerNorm",
"intermediate.dense",
"output.dense",
"output.LayerNorm",
]:
for w in ["weight", "bias"]:
__lowerCamelCase : str = state_dict[
f"""{prefix}.encoder.layer.{teacher_idx}.{layer}.{w}"""
]
std_idx += 1
# Language Modeling Head ###s
if args.model_type == "roberta":
for layer in ["lm_head.decoder.weight", "lm_head.bias"]:
__lowerCamelCase : Optional[Any] = state_dict[f"""{layer}"""]
if args.vocab_transform:
for w in ["weight", "bias"]:
__lowerCamelCase : Any = state_dict[f"""lm_head.dense.{w}"""]
__lowerCamelCase : Union[str, Any] = state_dict[f"""lm_head.layer_norm.{w}"""]
elif args.model_type == "gpt2":
for w in ["weight", "bias"]:
__lowerCamelCase : Optional[Any] = state_dict[f"""{prefix}.ln_f.{w}"""]
__lowerCamelCase : Dict = state_dict["""lm_head.weight"""]
print(f"""N layers selected for distillation: {std_idx}""")
print(f"""Number of params transferred for distillation: {len(compressed_sd.keys())}""")
print(f"""Save transferred checkpoint to {args.dump_checkpoint}.""")
torch.save(compressed_sd, args.dump_checkpoint)
| 52 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
'''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''],
'''tokenization_cpmant''': ['''CpmAntTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CpmAntForCausalLM''',
'''CpmAntModel''',
'''CpmAntPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __snake_case ( _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase_ = KandinskyVaaPipeline
lowerCAmelCase_ = [
'image_embeds',
'negative_image_embeds',
]
lowerCAmelCase_ = ['image_embeds', 'negative_image_embeds']
lowerCAmelCase_ = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
lowerCAmelCase_ = False
@property
def __a ( self : Tuple ):
"""simple docstring"""
return 32
@property
def __a ( self : int ):
"""simple docstring"""
return 32
@property
def __a ( self : int ):
"""simple docstring"""
return self.time_input_dim
@property
def __a ( self : List[Any] ):
"""simple docstring"""
return self.time_input_dim * 4
@property
def __a ( self : Union[str, Any] ):
"""simple docstring"""
return 1_00
@property
def __a ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
SCREAMING_SNAKE_CASE__ = UNetaDConditionModel(**_UpperCAmelCase )
return model
@property
def __a ( self : Optional[Any] ):
"""simple docstring"""
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __a ( self : Dict ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = VQModel(**self.dummy_movq_kwargs )
return model
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.dummy_unet
SCREAMING_SNAKE_CASE__ = self.dummy_movq
SCREAMING_SNAKE_CASE__ = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule="""linear""" , beta_start=0.0_00_85 , beta_end=0.0_12 , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , steps_offset=1 , prediction_type="""epsilon""" , thresholding=_UpperCAmelCase , )
SCREAMING_SNAKE_CASE__ = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __a ( self : List[str] , _lowercase : str , _lowercase : Dict=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_UpperCAmelCase )
if str(_UpperCAmelCase ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE__ = torch.manual_seed(_UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def __a ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 'cpu'
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = self.pipeline_class(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = pipe(**self.get_dummy_inputs(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = pipe(
**self.get_dummy_inputs(_UpperCAmelCase ) , return_dict=_UpperCAmelCase , )[0]
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE__ = np.array(
[0.6_23_79_76, 1.0, 0.36_44_13_32, 1.0, 0.70_63_96_34, 0.29_87_71_86, 0.85_65_21_25, 0.5_21_68_43, 0.54_45_40_46] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_slice.flatten()}"""
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f""" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"""
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def __a ( self : Dict ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy""" )
SCREAMING_SNAKE_CASE__ = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" , torch_dtype=torch.floataa )
pipe_prior.to(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = KandinskyVaaPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-decoder""" , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ = pipeline.to(_UpperCAmelCase )
pipeline.set_progress_bar_config(disable=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = 'red cat, 4k photo'
SCREAMING_SNAKE_CASE__ = torch.Generator(device="""cuda""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipe_prior(
_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=5 , negative_prompt="""""" , ).to_tuple()
SCREAMING_SNAKE_CASE__ = torch.Generator(device="""cuda""" ).manual_seed(0 )
SCREAMING_SNAKE_CASE__ = pipeline(
image_embeds=_UpperCAmelCase , negative_image_embeds=_UpperCAmelCase , generator=_UpperCAmelCase , num_inference_steps=1_00 , output_type="""np""" , )
SCREAMING_SNAKE_CASE__ = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(_UpperCAmelCase , _UpperCAmelCase )
| 219 |
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : str = LayoutLMTokenizer
lowerCAmelCase : Tuple = LayoutLMTokenizerFast
lowerCAmelCase : List[Any] = True
lowerCAmelCase : int = True
def __lowercase ( self : Dict ):
super().setUp()
_a : int = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_a : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __lowercase ( self : Dict ,**_UpperCAmelCase : List[str] ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname ,**_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : Tuple ):
_a : Optional[int] = 'UNwant\u00E9d,running'
_a : List[Any] = 'unwanted, running'
return input_text, output_text
def __lowercase ( self : Optional[int] ):
_a : Optional[Any] = self.tokenizer_class(self.vocab_file )
_a : Optional[Any] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_UpperCAmelCase ,['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) ,[7, 4, 5, 10, 8, 9] )
def __lowercase ( self : Optional[int] ):
pass
| 89 | 0 |
"""simple docstring"""
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a__ ( _UpperCamelCase ):
_lowerCamelCase = ['image_processor', 'tokenizer']
_lowerCamelCase = 'CLIPImageProcessor'
_lowerCamelCase = ('XLMRobertaTokenizer', 'XLMRobertaTokenizerFast')
def __init__( self : Dict, lowerCAmelCase : Tuple=None, lowerCAmelCase : int=None, **lowerCAmelCase : str ) -> str:
lowercase : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.', _UpperCAmelCase, )
lowercase : Dict = kwargs.pop('feature_extractor' )
lowercase : Optional[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_UpperCAmelCase, _UpperCAmelCase )
def __call__( self : Union[str, Any], lowerCAmelCase : Dict=None, lowerCAmelCase : List[str]=None, lowerCAmelCase : Optional[Any]=None, **lowerCAmelCase : Optional[Any] ) -> List[str]:
if text is None and images is None:
raise ValueError('You have to specify either text or images. Both cannot be none.' )
if text is not None:
lowercase : int = self.tokenizer(_UpperCAmelCase, return_tensors=_UpperCAmelCase, **_UpperCAmelCase )
if images is not None:
lowercase : Dict = self.image_processor(_UpperCAmelCase, return_tensors=_UpperCAmelCase, **_UpperCAmelCase )
if text is not None and images is not None:
lowercase : Optional[int] = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**_UpperCAmelCase ), tensor_type=_UpperCAmelCase )
def lowercase ( self : int, *lowerCAmelCase : List[Any], **lowerCAmelCase : Dict ) -> List[Any]:
return self.tokenizer.batch_decode(*_UpperCAmelCase, **_UpperCAmelCase )
def lowercase ( self : Optional[int], *lowerCAmelCase : Any, **lowerCAmelCase : Dict ) -> List[Any]:
return self.tokenizer.decode(*_UpperCAmelCase, **_UpperCAmelCase )
@property
def lowercase ( self : Any ) -> Any:
lowercase : Tuple = self.tokenizer.model_input_names
lowercase : Tuple = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 255 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Any = 'conditional_detr'
lowerCAmelCase : List[str] = ['past_key_values']
lowerCAmelCase : Optional[int] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Optional[int] ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : List[Any]=3 ,_UpperCAmelCase : List[Any]=300 ,_UpperCAmelCase : Dict=6 ,_UpperCAmelCase : List[str]=2048 ,_UpperCAmelCase : Optional[int]=8 ,_UpperCAmelCase : List[Any]=6 ,_UpperCAmelCase : Optional[int]=2048 ,_UpperCAmelCase : Dict=8 ,_UpperCAmelCase : int=0.0 ,_UpperCAmelCase : Optional[Any]=0.0 ,_UpperCAmelCase : Optional[Any]=True ,_UpperCAmelCase : str="relu" ,_UpperCAmelCase : Tuple=256 ,_UpperCAmelCase : Optional[int]=0.1 ,_UpperCAmelCase : str=0.0 ,_UpperCAmelCase : Optional[int]=0.0 ,_UpperCAmelCase : Union[str, Any]=0.02 ,_UpperCAmelCase : List[str]=1.0 ,_UpperCAmelCase : Any=False ,_UpperCAmelCase : int="sine" ,_UpperCAmelCase : List[str]="resnet50" ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : str=False ,_UpperCAmelCase : str=2 ,_UpperCAmelCase : int=5 ,_UpperCAmelCase : Optional[int]=2 ,_UpperCAmelCase : str=1 ,_UpperCAmelCase : Union[str, Any]=1 ,_UpperCAmelCase : List[str]=2 ,_UpperCAmelCase : Union[str, Any]=5 ,_UpperCAmelCase : List[Any]=2 ,_UpperCAmelCase : Optional[int]=0.25 ,**_UpperCAmelCase : Tuple ,):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
_a : Optional[Any] = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : str = backbone_config.get('model_type' )
_a : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
_a : List[Any] = config_class.from_dict(_UpperCAmelCase )
_a : Tuple = use_timm_backbone
_a : Union[str, Any] = backbone_config
_a : List[Any] = num_channels
_a : Union[str, Any] = num_queries
_a : Optional[Any] = d_model
_a : Tuple = encoder_ffn_dim
_a : Dict = encoder_layers
_a : List[str] = encoder_attention_heads
_a : Union[str, Any] = decoder_ffn_dim
_a : Optional[int] = decoder_layers
_a : int = decoder_attention_heads
_a : Optional[int] = dropout
_a : Tuple = attention_dropout
_a : List[Any] = activation_dropout
_a : str = activation_function
_a : Optional[Any] = init_std
_a : Union[str, Any] = init_xavier_std
_a : List[Any] = encoder_layerdrop
_a : List[Any] = decoder_layerdrop
_a : Dict = encoder_layers
_a : List[Any] = auxiliary_loss
_a : Optional[int] = position_embedding_type
_a : List[Any] = backbone
_a : Optional[int] = use_pretrained_backbone
_a : Optional[int] = dilation
# Hungarian matcher
_a : Tuple = class_cost
_a : str = bbox_cost
_a : Any = giou_cost
# Loss coefficients
_a : Tuple = mask_loss_coefficient
_a : Dict = dice_loss_coefficient
_a : Tuple = cls_loss_coefficient
_a : Any = bbox_loss_coefficient
_a : Dict = giou_loss_coefficient
_a : Union[str, Any] = focal_alpha
super().__init__(is_encoder_decoder=_UpperCAmelCase ,**_UpperCAmelCase )
@property
def __lowercase ( self : Dict ):
return self.encoder_attention_heads
@property
def __lowercase ( self : str ):
return self.d_model
def __lowercase ( self : int ):
_a : List[str] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_a : Dict = self.backbone_config.to_dict()
_a : Union[str, Any] = self.__class__.model_type
return output
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : str = version.parse('1.11' )
@property
def __lowercase ( self : Dict ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def __lowercase ( self : Any ):
return 1E-5
@property
def __lowercase ( self : List[Any] ):
return 12
| 89 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a : Tuple = logging.get_logger(__name__)
a : str = {
"microsoft/table-transformer-detection": (
"https://huggingface.co/microsoft/table-transformer-detection/resolve/main/config.json"
),
}
class UpperCamelCase__ ( _UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = 'table-transformer'
SCREAMING_SNAKE_CASE__ : str = ['past_key_values']
SCREAMING_SNAKE_CASE__ : Any = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , snake_case=True , snake_case=None , snake_case=3 , snake_case=1_0_0 , snake_case=6 , snake_case=2_0_4_8 , snake_case=8 , snake_case=6 , snake_case=2_0_4_8 , snake_case=8 , snake_case=0.0 , snake_case=0.0 , snake_case=True , snake_case="relu" , snake_case=2_5_6 , snake_case=0.1 , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1.0 , snake_case=False , snake_case="sine" , snake_case="resnet50" , snake_case=True , snake_case=False , snake_case=1 , snake_case=5 , snake_case=2 , snake_case=1 , snake_case=1 , snake_case=5 , snake_case=2 , snake_case=0.1 , **snake_case , ):
'''simple docstring'''
if backbone_config is not None and use_timm_backbone:
raise ValueError("You can\'t specify both `backbone_config` and `use_timm_backbone`." )
if not use_timm_backbone:
if backbone_config is None:
logger.info("`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone." )
UpperCAmelCase : Tuple = CONFIG_MAPPING['resnet'](out_features=["stage4"] )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
UpperCAmelCase : Any = backbone_config.get("model_type" )
UpperCAmelCase : List[str] = CONFIG_MAPPING[backbone_model_type]
UpperCAmelCase : Dict = config_class.from_dict(_UpperCAmelCase )
# set timm attributes to None
UpperCAmelCase : int = None, None, None
UpperCAmelCase : str = use_timm_backbone
UpperCAmelCase : List[Any] = backbone_config
UpperCAmelCase : Tuple = num_channels
UpperCAmelCase : List[str] = num_queries
UpperCAmelCase : Tuple = d_model
UpperCAmelCase : Optional[Any] = encoder_ffn_dim
UpperCAmelCase : Tuple = encoder_layers
UpperCAmelCase : List[str] = encoder_attention_heads
UpperCAmelCase : List[str] = decoder_ffn_dim
UpperCAmelCase : Tuple = decoder_layers
UpperCAmelCase : Any = decoder_attention_heads
UpperCAmelCase : int = dropout
UpperCAmelCase : Optional[int] = attention_dropout
UpperCAmelCase : Optional[int] = activation_dropout
UpperCAmelCase : int = activation_function
UpperCAmelCase : Optional[int] = init_std
UpperCAmelCase : List[Any] = init_xavier_std
UpperCAmelCase : Dict = encoder_layerdrop
UpperCAmelCase : str = decoder_layerdrop
UpperCAmelCase : Optional[Any] = encoder_layers
UpperCAmelCase : Optional[int] = auxiliary_loss
UpperCAmelCase : List[str] = position_embedding_type
UpperCAmelCase : Tuple = backbone
UpperCAmelCase : List[str] = use_pretrained_backbone
UpperCAmelCase : List[str] = dilation
# Hungarian matcher
UpperCAmelCase : Dict = class_cost
UpperCAmelCase : Union[str, Any] = bbox_cost
UpperCAmelCase : str = giou_cost
# Loss coefficients
UpperCAmelCase : str = mask_loss_coefficient
UpperCAmelCase : Tuple = dice_loss_coefficient
UpperCAmelCase : Optional[int] = bbox_loss_coefficient
UpperCAmelCase : List[str] = giou_loss_coefficient
UpperCAmelCase : List[Any] = eos_coefficient
super().__init__(is_encoder_decoder=_UpperCAmelCase , **_UpperCAmelCase )
@property
def A_ ( self ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def A_ ( self ):
'''simple docstring'''
return self.d_model
class UpperCamelCase__ ( _UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = version.parse("1.11" )
@property
def A_ ( self ):
'''simple docstring'''
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
("pixel_mask", {0: "batch"}),
] )
@property
def A_ ( self ):
'''simple docstring'''
return 1e-5
@property
def A_ ( self ):
'''simple docstring'''
return 1_2
| 311 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __magic_name__ :
def __init__( self : List[str] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : List[str]=13 ,_UpperCAmelCase : Any=32 ,_UpperCAmelCase : Union[str, Any]=3 ,_UpperCAmelCase : Optional[int]=4 ,_UpperCAmelCase : Optional[Any]=[10, 20, 30, 40] ,_UpperCAmelCase : Tuple=[2, 2, 3, 2] ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Union[str, Any]=37 ,_UpperCAmelCase : Optional[int]="gelu" ,_UpperCAmelCase : Optional[Any]=10 ,_UpperCAmelCase : Tuple=0.02 ,_UpperCAmelCase : Any=["stage2", "stage3", "stage4"] ,_UpperCAmelCase : Any=[2, 3, 4] ,_UpperCAmelCase : Tuple=None ,):
_a : Optional[Any] = parent
_a : List[Any] = batch_size
_a : str = image_size
_a : Union[str, Any] = num_channels
_a : List[Any] = num_stages
_a : Dict = hidden_sizes
_a : int = depths
_a : Tuple = is_training
_a : List[str] = use_labels
_a : Dict = intermediate_size
_a : int = hidden_act
_a : int = num_labels
_a : Any = initializer_range
_a : Tuple = out_features
_a : int = out_indices
_a : List[Any] = scope
def __lowercase ( self : Dict ):
_a : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Union[str, Any] = None
if self.use_labels:
_a : Tuple = ids_tensor([self.batch_size] ,self.num_labels )
_a : str = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Any ):
return ConvNextVaConfig(
num_channels=self.num_channels ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_stages=self.num_stages ,hidden_act=self.hidden_act ,is_decoder=_UpperCAmelCase ,initializer_range=self.initializer_range ,out_features=self.out_features ,out_indices=self.out_indices ,num_labels=self.num_labels ,)
def __lowercase ( self : Tuple ,_UpperCAmelCase : Any ,_UpperCAmelCase : Any ,_UpperCAmelCase : Optional[Any] ):
_a : Optional[Any] = ConvNextVaModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Any = model(_UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def __lowercase ( self : Tuple ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ):
_a : List[Any] = ConvNextVaForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : List[str] = model(_UpperCAmelCase ,labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowercase ( self : str ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[Any] ):
_a : Optional[int] = ConvNextVaBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Dict = model(_UpperCAmelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_a : Tuple = None
_a : List[Any] = ConvNextVaBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : List[str] = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def __lowercase ( self : Optional[Any] ):
_a : Any = self.prepare_config_and_inputs()
_a , _a , _a : Union[str, Any] = config_and_inputs
_a : Any = {'pixel_values': pixel_values}
return config, inputs_dict
def __lowercase ( self : str ):
_a : Tuple = self.prepare_config_and_inputs()
_a , _a , _a : Tuple = config_and_inputs
_a : List[Any] = {'pixel_values': pixel_values, 'labels': labels}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : str = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase : str = (
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase : int = False
lowerCAmelCase : str = False
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : List[str] = False
lowerCAmelCase : Optional[int] = False
def __lowercase ( self : List[Any] ):
_a : str = ConvNextVaModelTester(self )
_a : Tuple = ConfigTester(self ,config_class=_UpperCAmelCase ,has_text_modality=_UpperCAmelCase ,hidden_size=37 )
def __lowercase ( self : Optional[Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowercase ( self : str ):
return
@unittest.skip(reason='ConvNextV2 does not use inputs_embeds' )
def __lowercase ( self : List[Any] ):
pass
@unittest.skip(reason='ConvNextV2 does not support input and output embeddings' )
def __lowercase ( self : Optional[int] ):
pass
@unittest.skip(reason='ConvNextV2 does not use feedforward chunking' )
def __lowercase ( self : Any ):
pass
def __lowercase ( self : List[str] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
_a : Any = True
if model_class.__name__ in [
*get_values(_UpperCAmelCase ),
*get_values(_UpperCAmelCase ),
]:
continue
_a : Optional[Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
_a : str = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase )
_a : Optional[int] = model(**_UpperCAmelCase ).loss
loss.backward()
def __lowercase ( self : str ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
_a : Optional[int] = False
_a : Tuple = True
if (
model_class.__name__
in [*get_values(_UpperCAmelCase ), *get_values(_UpperCAmelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
_a : Tuple = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.train()
_a : Any = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase )
_a : List[Any] = model(**_UpperCAmelCase ).loss
loss.backward()
def __lowercase ( self : List[Any] ):
_a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : int = model_class(_UpperCAmelCase )
_a : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Dict = [*signature.parameters.keys()]
_a : int = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_UpperCAmelCase )
def __lowercase ( self : int ):
_a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def __lowercase ( self : Any ):
def check_hidden_states_output(_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Dict ):
_a : Union[str, Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_a : List[Any] = model(**self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ) )
_a : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_a : str = self.model_tester.num_stages
self.assertEqual(len(_UpperCAmelCase ) ,expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
_a , _a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : int = True
check_hidden_states_output(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : Optional[Any] = True
check_hidden_states_output(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
def __lowercase ( self : List[Any] ):
_a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def __lowercase ( self : int ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Any = ConvNextVaModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __lowerCamelCase ( ) -> List[Any]:
_a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
@cached_property
def __lowercase ( self : Optional[Any] ):
return AutoImageProcessor.from_pretrained('facebook/convnextv2-tiny-1k-224' ) if is_vision_available() else None
@slow
def __lowercase ( self : Any ):
_a : List[str] = ConvNextVaForImageClassification.from_pretrained('facebook/convnextv2-tiny-1k-224' ).to(_UpperCAmelCase )
_a : Optional[int] = self.default_image_processor
_a : str = prepare_img()
_a : str = preprocessor(images=_UpperCAmelCase ,return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_a : Dict = model(**_UpperCAmelCase )
# verify the logits
_a : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,_UpperCAmelCase )
_a : Optional[Any] = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_UpperCAmelCase ,atol=1E-4 ) )
| 89 | 0 |
'''simple docstring'''
def _a( UpperCamelCase__ : Union[str, Any] ):
'''simple docstring'''
for i in range(len(lowerCAmelCase_ ) - 1, 0, -1 ):
SCREAMING_SNAKE_CASE__ : List[Any] =False
for j in range(lowerCAmelCase_, 0, -1 ):
if unsorted[j] < unsorted[j - 1]:
SCREAMING_SNAKE_CASE__ : Tuple =unsorted[j - 1], unsorted[j]
SCREAMING_SNAKE_CASE__ : Any =True
for j in range(lowerCAmelCase_ ):
if unsorted[j] > unsorted[j + 1]:
SCREAMING_SNAKE_CASE__ : Any =unsorted[j + 1], unsorted[j]
SCREAMING_SNAKE_CASE__ : Tuple =True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
a_ = input('Enter numbers separated by a comma:\n').strip()
a_ = [int(item) for item in user_input.split(',')]
print(F'''{cocktail_shaker_sort(unsorted) = }''') | 152 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
'''configuration_lilt''': ['''LILT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LiltConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''LILT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LiltForQuestionAnswering''',
'''LiltForSequenceClassification''',
'''LiltForTokenClassification''',
'''LiltModel''',
'''LiltPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89 | 0 |
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
lowerCAmelCase__ : Any = WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def UpperCamelCase__ ( A__ ) -> Tuple:
snake_case__ : int = test_results.split(' ' )
snake_case__ : Optional[Any] = 0
snake_case__ : Optional[Any] = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
snake_case__ : Union[str, Any] = expressions[-2] if '=' in expressions[-1] else expressions[-1]
for i, expression in enumerate(lowerCAmelCase_ ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def UpperCamelCase__ ( A__ ) -> str:
snake_case__ : List[str] = {}
snake_case__ : str = None
snake_case__ : List[str] = False
for line in failures_short_lines.split('\n' ):
if re.search(r'_ \[doctest\]' , lowerCAmelCase_ ):
snake_case__ : int = True
snake_case__ : Any = line.split(' ' )[2]
elif in_error and not line.split(' ' )[0].isdigit():
snake_case__ : Any = line
snake_case__ : Union[str, Any] = False
return failures
class __snake_case :
def __init__( self , __UpperCamelCase , __UpperCamelCase ) -> str:
'''simple docstring'''
snake_case__ : str = title
snake_case__ : Tuple = doc_test_results['time_spent'].split(',' )[0]
snake_case__ : List[Any] = doc_test_results['success']
snake_case__ : Dict = doc_test_results['failures']
snake_case__ : Optional[Any] = self.n_success + self.n_failures
# Failures and success of the modeling tests
snake_case__ : Union[str, Any] = doc_test_results
@property
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : Union[str, Any] = [self._time_spent]
snake_case__ : List[str] = 0
for time in time_spent:
snake_case__ : Optional[Any] = time.split(':' )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(_UpperCAmelCase ) == 1:
snake_case__ : Union[str, Any] = [0, 0, time_parts[0]]
snake_case__ : Any = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 3600 + minutes * 60 + seconds
snake_case__ : Any = total_secs // 3600, (total_secs % 3600) // 60, total_secs % 60
return F"""{int(_UpperCAmelCase )}h{int(_UpperCAmelCase )}m{int(_UpperCAmelCase )}s"""
@property
def __a ( self ) -> Tuple:
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": F"""🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.""",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
@property
def __a ( self ) -> Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
F"""There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"""
F""" {self.time}."""
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
@property
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : List[str] = 40
snake_case__ : Optional[int] = {k: v['failed'] for k, v in doc_test_results.items() if isinstance(_UpperCAmelCase , _UpperCAmelCase )}
snake_case__ : Optional[int] = ''
for category, failures in category_failures.items():
if len(_UpperCAmelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += F"""*{category} failures*:""".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(_UpperCAmelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": F"""The following examples had failures:\n\n\n{report}\n""",
},
}
@property
def __a ( self ) -> str:
'''simple docstring'''
snake_case__ : List[Any] = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(_UpperCAmelCase )
@staticmethod
def __a ( ) -> List[Any]:
'''simple docstring'''
snake_case__ : Union[str, Any] = [
{
'type': 'section',
'text': {
'type': 'plain_text',
'text': 'There was an issue running the tests.',
},
'accessory': {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'Check Action results', 'emoji': True},
'url': F"""https://github.com/huggingface/transformers/actions/runs/{os.environ['GITHUB_RUN_ID']}""",
},
}
]
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(_UpperCAmelCase )} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text='There was an issue running the tests.' , blocks=_UpperCAmelCase , )
def __a ( self ) -> Tuple:
'''simple docstring'''
print('Sending the following payload' )
print(json.dumps({'blocks': json.loads(self.payload )} ) )
snake_case__ : List[Any] = F"""{self.n_failures} failures out of {self.n_tests} tests,""" if self.n_failures else 'All tests passed.'
snake_case__ : Dict = client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , blocks=self.payload , text=_UpperCAmelCase , )
def __a ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Union[str, Any] = ''
for key, value in failures.items():
snake_case__ : Optional[Any] = value[:200] + ' [Truncated]' if len(_UpperCAmelCase ) > 250 else value
failures_text += F"""*{key}*\n_{value}_\n\n"""
snake_case__ : Tuple = job_name
snake_case__ : Union[str, Any] = {'type': 'section', 'text': {'type': 'mrkdwn', 'text': text}}
if job_link is not None:
snake_case__ : Optional[int] = {
'type': 'button',
'text': {'type': 'plain_text', 'text': 'GitHub Action job', 'emoji': True},
'url': job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def __a ( self ) -> Dict:
'''simple docstring'''
if self.thread_ts is None:
raise ValueError('Can only post reply if a post has been made.' )
snake_case__ : str = self.doc_test_results.pop('job_link' )
self.doc_test_results.pop('failures' )
self.doc_test_results.pop('success' )
self.doc_test_results.pop('time_spent' )
snake_case__ : Tuple = sorted(self.doc_test_results.items() , key=lambda __UpperCamelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result['failures'] ):
snake_case__ : Optional[int] = F"""*Num failures* :{len(job_result['failed'] )} \n"""
snake_case__ : List[str] = job_result['failures']
snake_case__ : str = self.get_reply_blocks(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , text=_UpperCAmelCase )
print('Sending the following reply' )
print(json.dumps({'blocks': blocks} ) )
client.chat_postMessage(
channel=os.environ['CI_SLACK_CHANNEL_ID_DAILY'] , text=F"""Results for {job}""" , blocks=_UpperCAmelCase , thread_ts=self.thread_ts['ts'] , )
time.sleep(1 )
def UpperCamelCase__ ( ) -> Optional[Any]:
snake_case__ : Optional[int] = os.environ['GITHUB_RUN_ID']
snake_case__ : Any = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"""
snake_case__ : Union[str, Any] = requests.get(lowerCAmelCase_ ).json()
snake_case__ : List[Any] = {}
try:
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
snake_case__ : int = math.ceil((result['total_count'] - 100) / 100 )
for i in range(lowerCAmelCase_ ):
snake_case__ : List[Any] = requests.get(url + F"""&page={i + 2}""" ).json()
jobs.update({job['name']: job['html_url'] for job in result['jobs']} )
return jobs
except Exception as e:
print('Unknown error, could not fetch links.' , lowerCAmelCase_ )
return {}
def UpperCamelCase__ ( A__ ) -> Optional[int]:
snake_case__ : Optional[Any] = {}
if os.path.exists(lowerCAmelCase_ ):
snake_case__ : Dict = os.listdir(lowerCAmelCase_ )
for file in files:
try:
with open(os.path.join(lowerCAmelCase_ , lowerCAmelCase_ ) , encoding='utf-8' ) as f:
snake_case__ : List[str] = f.read()
except UnicodeDecodeError as e:
raise ValueError(F"""Could not open {os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )}.""" ) from e
return _artifact
def UpperCamelCase__ ( ) -> Optional[int]:
class __snake_case :
def __init__( self , __UpperCamelCase ) -> List[Any]:
'''simple docstring'''
snake_case__ : str = name
snake_case__ : Union[str, Any] = []
def __str__( self ) -> Dict:
'''simple docstring'''
return self.name
def __a ( self , __UpperCamelCase ) -> Tuple:
'''simple docstring'''
self.paths.append({'name': self.name, 'path': path} )
snake_case__ : Dict[str, Artifact] = {}
snake_case__ : Any = filter(os.path.isdir , os.listdir() )
for directory in directories:
snake_case__ : Optional[int] = directory
if artifact_name not in _available_artifacts:
snake_case__ : Optional[Any] = Artifact(lowerCAmelCase_ )
_available_artifacts[artifact_name].add_path(lowerCAmelCase_ )
return _available_artifacts
if __name__ == "__main__":
lowerCAmelCase__ : List[str] = get_job_links()
lowerCAmelCase__ : Union[str, Any] = retrieve_available_artifacts()
lowerCAmelCase__ : List[str] = collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
lowerCAmelCase__ : Optional[Any] = {
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
lowerCAmelCase__ : Union[str, Any] = github_actions_job_links.get('''run_doctests''')
lowerCAmelCase__ : Any = available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
lowerCAmelCase__ : Tuple = retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
lowerCAmelCase__, lowerCAmelCase__, lowerCAmelCase__ : Union[str, Any] = handle_test_results(artifact['''stats'''])
lowerCAmelCase__ : Dict = failed
lowerCAmelCase__ : Union[str, Any] = success
lowerCAmelCase__ : Optional[int] = time_spent[1:-1] + ''', '''
lowerCAmelCase__ : Dict = extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
lowerCAmelCase__ : Optional[int] = line.replace('''FAILED ''', '''''')
lowerCAmelCase__ : Dict = line.split()[0].replace('''\n''', '''''')
if "::" in line:
lowerCAmelCase__, lowerCAmelCase__ : List[str] = line.split('''::''')
else:
lowerCAmelCase__, lowerCAmelCase__ : Dict = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
lowerCAmelCase__ : Optional[int] = docs[file_regex]
doc_test_results[category]["failed"].append(test)
lowerCAmelCase__ : Union[str, Any] = all_failures[test] if test in all_failures else '''N/A'''
lowerCAmelCase__ : Dict = failure
break
lowerCAmelCase__ : Tuple = Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply()
| 143 |
'''simple docstring'''
import math
def __lowerCamelCase ( lowerCAmelCase_ ) -> bool:
_a : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ = 1 / 12345 ) -> int:
_a : int = 0
_a : Optional[Any] = 0
_a : int = 3
while True:
_a : Tuple = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(lowerCAmelCase_ ):
_a : Union[str, Any] = int(lowerCAmelCase_ )
total_partitions += 1
if check_partition_perfect(lowerCAmelCase_ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(lowerCAmelCase_ )
integer += 1
if __name__ == "__main__":
print(f"""{solution() = }""")
| 89 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConformerConfig,
WavaVecaConformerForCTC,
WavaVecaConformerForPreTraining,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_a = logging.get_logger(__name__)
_a = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.linear_k''': '''encoder.layers.*.self_attn.linear_k''',
'''self_attn.linear_v''': '''encoder.layers.*.self_attn.linear_v''',
'''self_attn.linear_q''': '''encoder.layers.*.self_attn.linear_q''',
'''self_attn.pos_bias_u''': '''encoder.layers.*.self_attn.pos_bias_u''',
'''self_attn.pos_bias_v''': '''encoder.layers.*.self_attn.pos_bias_v''',
'''self_attn.linear_out''': '''encoder.layers.*.self_attn.linear_out''',
'''self_attn.linear_pos''': '''encoder.layers.*.self_attn.linear_pos''',
'''self_attn.rotary_emb''': '''encoder.embed_positions''',
'''self_attn_layer_norm''': '''encoder.layers.*.self_attn_layer_norm''',
'''conv_module.pointwise_conv1''': '''encoder.layers.*.conv_module.pointwise_conv1''',
'''conv_module.pointwise_conv2''': '''encoder.layers.*.conv_module.pointwise_conv2''',
'''conv_module.depthwise_conv''': '''encoder.layers.*.conv_module.depthwise_conv''',
'''conv_module.batch_norm''': '''encoder.layers.*.conv_module.batch_norm''',
'''conv_module.layer_norm''': '''encoder.layers.*.conv_module.layer_norm''',
'''ffn1.w_1''': '''encoder.layers.*.ffn1.intermediate_dense''',
'''ffn1.w_2''': '''encoder.layers.*.ffn1.output_dense''',
'''ffn1.layer_norm''': '''encoder.layers.*.ffn1_layer_norm''',
'''ffn2.w_1''': '''encoder.layers.*.ffn2.intermediate_dense''',
'''ffn2.w_2''': '''encoder.layers.*.ffn2.output_dense''',
'''ffn2.layer_norm''': '''encoder.layers.*.ffn2_layer_norm''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
_a = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Optional[int]:
"""simple docstring"""
for attribute in key.split('.' ):
_UpperCAmelCase = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if weight_type is not None:
_UpperCAmelCase = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape
else:
_UpperCAmelCase = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F"""Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}""" )
if weight_type == "weight":
_UpperCAmelCase = value
elif weight_type == "weight_g":
_UpperCAmelCase = value
elif weight_type == "weight_v":
_UpperCAmelCase = value
elif weight_type == "bias":
_UpperCAmelCase = value
elif weight_type == "running_mean":
_UpperCAmelCase = value
elif weight_type == "running_var":
_UpperCAmelCase = value
elif weight_type == "num_batches_tracked":
_UpperCAmelCase = value
elif weight_type == "inv_freq":
_UpperCAmelCase = value
else:
_UpperCAmelCase = value
logger.info(F"""{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.""" )
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Dict:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = fairseq_model.state_dict()
_UpperCAmelCase = hf_model.wavaveca_conformer.feature_extractor
for name, value in fairseq_dict.items():
_UpperCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , hf_model.config.feat_extract_norm == 'group' , )
_UpperCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
_UpperCAmelCase = 'wav2vec2_conformer.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
_UpperCAmelCase = True
if "*" in mapped_key:
_UpperCAmelCase = name.split(lowerCAmelCase_ )[0].split('.' )[-2]
_UpperCAmelCase = mapped_key.replace('*' , lowerCAmelCase_ )
if "pos_bias_u" in name:
_UpperCAmelCase = None
elif "pos_bias_v" in name:
_UpperCAmelCase = None
elif "weight_g" in name:
_UpperCAmelCase = 'weight_g'
elif "weight_v" in name:
_UpperCAmelCase = 'weight_v'
elif "bias" in name:
_UpperCAmelCase = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
_UpperCAmelCase = 'weight'
elif "running_mean" in name:
_UpperCAmelCase = 'running_mean'
elif "inv_freq" in name:
_UpperCAmelCase = 'inv_freq'
elif "running_var" in name:
_UpperCAmelCase = 'running_var'
elif "num_batches_tracked" in name:
_UpperCAmelCase = 'num_batches_tracked'
else:
_UpperCAmelCase = None
set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase_ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> Any:
"""simple docstring"""
_UpperCAmelCase = full_name.split('conv_layers.' )[-1]
_UpperCAmelCase = name.split('.' )
_UpperCAmelCase = int(items[0] )
_UpperCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
_UpperCAmelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
_UpperCAmelCase = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
_UpperCAmelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
_UpperCAmelCase = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCAmelCase_ )
@torch.no_grad()
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=None , __lowerCAmelCase=None , __lowerCAmelCase=True )-> Optional[int]:
"""simple docstring"""
if config_path is not None:
_UpperCAmelCase = WavaVecaConformerConfig.from_pretrained(lowerCAmelCase_ , hidden_act='swish' )
else:
_UpperCAmelCase = WavaVecaConformerConfig()
if "rope" in checkpoint_path:
_UpperCAmelCase = 'rotary'
if is_finetuned:
if dict_path:
_UpperCAmelCase = Dictionary.load(lowerCAmelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_UpperCAmelCase = target_dict.pad_index
_UpperCAmelCase = target_dict.bos_index
_UpperCAmelCase = target_dict.eos_index
_UpperCAmelCase = len(target_dict.symbols )
_UpperCAmelCase = os.path.join(lowerCAmelCase_ , 'vocab.json' )
if not os.path.isdir(lowerCAmelCase_ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCAmelCase_ ) )
return
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
_UpperCAmelCase = target_dict.indices
# fairseq has the <pad> and <s> switched
_UpperCAmelCase = 0
_UpperCAmelCase = 1
with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase = WavaVecaCTCTokenizer(
lowerCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowerCAmelCase_ , )
_UpperCAmelCase = True if config.feat_extract_norm == 'layer' else False
_UpperCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
_UpperCAmelCase = WavaVecaProcessor(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
_UpperCAmelCase = WavaVecaConformerForCTC(lowerCAmelCase_ )
else:
_UpperCAmelCase = WavaVecaConformerForPreTraining(lowerCAmelCase_ )
if is_finetuned:
_UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
_UpperCAmelCase = argparse.Namespace(task='audio_pretraining' )
_UpperCAmelCase = fairseq.tasks.setup_task(lowerCAmelCase_ )
_UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=lowerCAmelCase_ )
_UpperCAmelCase = model[0].eval()
recursively_load_weights(lowerCAmelCase_ , lowerCAmelCase_ , not is_finetuned )
hf_wavavec.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_a = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
_a = parser.parse_args()
convert_wavaveca_conformer_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 39 |
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=1 ) -> Dict:
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=0 ) -> Tuple:
_a : Any = []
for old_item in old_list:
_a : Union[str, Any] = old_item.replace('in_layers.0' , 'norm1' )
_a : Optional[int] = new_item.replace('in_layers.2' , 'conv1' )
_a : str = new_item.replace('out_layers.0' , 'norm2' )
_a : List[str] = new_item.replace('out_layers.3' , 'conv2' )
_a : str = new_item.replace('emb_layers.1' , 'time_emb_proj' )
_a : Tuple = new_item.replace('skip_connection' , 'conv_shortcut' )
_a : Any = shave_segments(lowerCAmelCase_ , n_shave_prefix_segments=lowerCAmelCase_ )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=0 ) -> Any:
_a : List[str] = []
for old_item in old_list:
_a : List[Any] = old_item
_a : Optional[int] = new_item.replace('norm.weight' , 'group_norm.weight' )
_a : Optional[Any] = new_item.replace('norm.bias' , 'group_norm.bias' )
_a : Any = new_item.replace('proj_out.weight' , 'proj_attn.weight' )
_a : Optional[Any] = new_item.replace('proj_out.bias' , 'proj_attn.bias' )
_a : Optional[int] = shave_segments(lowerCAmelCase_ , n_shave_prefix_segments=lowerCAmelCase_ )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> Any:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_a : Optional[Any] = old_checkpoint[path]
_a : Optional[Any] = old_tensor.shape[0] // 3
_a : Any = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_a : int = old_tensor.shape[0] // config['num_head_channels'] // 3
_a : str = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_a , _a , _a : Tuple = old_tensor.split(channels // num_heads , dim=1 )
_a : Dict = query.reshape(lowerCAmelCase_ )
_a : str = key.reshape(lowerCAmelCase_ )
_a : Optional[int] = value.reshape(lowerCAmelCase_ )
for path in paths:
_a : Dict = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_a : Any = new_path.replace('middle_block.0' , 'mid_block.resnets.0' )
_a : str = new_path.replace('middle_block.1' , 'mid_block.attentions.0' )
_a : Union[str, Any] = new_path.replace('middle_block.2' , 'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
_a : int = new_path.replace(replacement['old'] , replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_a : List[str] = old_checkpoint[path['old']][:, :, 0]
else:
_a : Dict = old_checkpoint[path['old']]
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_a : Optional[int] = {}
_a : Dict = checkpoint['time_embed.0.weight']
_a : Tuple = checkpoint['time_embed.0.bias']
_a : Union[str, Any] = checkpoint['time_embed.2.weight']
_a : List[str] = checkpoint['time_embed.2.bias']
_a : List[str] = checkpoint['input_blocks.0.0.weight']
_a : Union[str, Any] = checkpoint['input_blocks.0.0.bias']
_a : Optional[int] = checkpoint['out.0.weight']
_a : int = checkpoint['out.0.bias']
_a : List[str] = checkpoint['out.2.weight']
_a : Optional[int] = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
_a : Optional[int] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
_a : Dict = {
layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key]
for layer_id in range(lowerCAmelCase_ )
}
# Retrieves the keys for the middle blocks only
_a : List[Any] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
_a : Union[str, Any] = {
layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key]
for layer_id in range(lowerCAmelCase_ )
}
# Retrieves the keys for the output blocks only
_a : Optional[int] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
_a : str = {
layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key]
for layer_id in range(lowerCAmelCase_ )
}
for i in range(1 , lowerCAmelCase_ ):
_a : List[Any] = (i - 1) // (config['num_res_blocks'] + 1)
_a : Optional[int] = (i - 1) % (config['num_res_blocks'] + 1)
_a : Optional[int] = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key]
_a : Optional[Any] = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key]
if f"""input_blocks.{i}.0.op.weight""" in checkpoint:
_a : List[Any] = checkpoint[
f"""input_blocks.{i}.0.op.weight"""
]
_a : Union[str, Any] = checkpoint[
f"""input_blocks.{i}.0.op.bias"""
]
continue
_a : Any = renew_resnet_paths(lowerCAmelCase_ )
_a : List[str] = {'old': f"""input_blocks.{i}.0""", 'new': f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
_a : Optional[Any] = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path, resnet_op] , config=lowerCAmelCase_ )
if len(lowerCAmelCase_ ):
_a : List[str] = renew_attention_paths(lowerCAmelCase_ )
_a : List[Any] = {
'old': f"""input_blocks.{i}.1""",
'new': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
_a : Optional[Any] = {
f"""input_blocks.{i}.1.qkv.bias""": {
'key': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'query': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'value': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""input_blocks.{i}.1.qkv.weight""": {
'key': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'query': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'value': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , attention_paths_to_split=lowerCAmelCase_ , config=lowerCAmelCase_ , )
_a : str = middle_blocks[0]
_a : Tuple = middle_blocks[1]
_a : Any = middle_blocks[2]
_a : List[Any] = renew_resnet_paths(lowerCAmelCase_ )
assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , config=lowerCAmelCase_ )
_a : Any = renew_resnet_paths(lowerCAmelCase_ )
assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , config=lowerCAmelCase_ )
_a : int = renew_attention_paths(lowerCAmelCase_ )
_a : int = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , attention_paths_to_split=lowerCAmelCase_ , config=lowerCAmelCase_ )
for i in range(lowerCAmelCase_ ):
_a : List[str] = i // (config['num_res_blocks'] + 1)
_a : Any = i % (config['num_res_blocks'] + 1)
_a : Union[str, Any] = [shave_segments(lowerCAmelCase_ , 2 ) for name in output_blocks[i]]
_a : Optional[Any] = {}
for layer in output_block_layers:
_a , _a : str = layer.split('.' )[0], shave_segments(lowerCAmelCase_ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(lowerCAmelCase_ )
else:
_a : str = [layer_name]
if len(lowerCAmelCase_ ) > 1:
_a : str = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key]
_a : Optional[Any] = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key]
_a : Dict = renew_resnet_paths(lowerCAmelCase_ )
_a : str = renew_resnet_paths(lowerCAmelCase_ )
_a : Optional[int] = {'old': f"""output_blocks.{i}.0""", 'new': f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , config=lowerCAmelCase_ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_a : List[Any] = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
_a : Tuple = checkpoint[
f"""output_blocks.{i}.{index}.conv.weight"""
]
_a : List[str] = checkpoint[
f"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(lowerCAmelCase_ ) == 2:
_a : Union[str, Any] = []
if len(lowerCAmelCase_ ):
_a : Tuple = renew_attention_paths(lowerCAmelCase_ )
_a : str = {
'old': f"""output_blocks.{i}.1""",
'new': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
_a : List[Any] = {
f"""output_blocks.{i}.1.qkv.bias""": {
'key': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'query': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'value': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""output_blocks.{i}.1.qkv.weight""": {
'key': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'query': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'value': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=lowerCAmelCase_ , )
else:
_a : List[Any] = renew_resnet_paths(lowerCAmelCase_ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_a : int = '.'.join(['output_blocks', str(lowerCAmelCase_ ), path['old']] )
_a : Union[str, Any] = '.'.join(['up_blocks', str(lowerCAmelCase_ ), 'resnets', str(lowerCAmelCase_ ), path['new']] )
_a : Union[str, Any] = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__lowerCAmelCase = json.loads(f.read())
__lowerCAmelCase = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__lowerCAmelCase = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__lowerCAmelCase = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__lowerCAmelCase = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__lowerCAmelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 89 | 0 |
import numpy as np
import torch
from ..models.clipseg import CLIPSegForImageSegmentation
from ..utils import is_vision_available, requires_backends
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class A (_UpperCamelCase ):
'''simple docstring'''
__lowerCamelCase : Tuple = (
'This is a tool that creates a segmentation mask of an image according to a label. It cannot create an image.'
'It takes two arguments named `image` which should be the original image, and `label` which should be a text '
'describing the elements what should be identified in the segmentation mask. The tool returns the mask.'
)
__lowerCamelCase : List[str] = 'CIDAS/clipseg-rd64-refined'
__lowerCamelCase : Optional[int] = 'image_segmenter'
__lowerCamelCase : Union[str, Any] = CLIPSegForImageSegmentation
__lowerCamelCase : str = ['image', 'text']
__lowerCamelCase : int = ['image']
def __init__( self : Tuple , *__lowerCAmelCase : Optional[int] , **__lowerCAmelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
requires_backends(self , ["""vision"""] )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
def a_ ( self : Optional[Any] , __lowerCAmelCase : "Image" , __lowerCAmelCase : str ) -> int:
"""simple docstring"""
return self.pre_processor(text=[label] , images=[image] , padding=_UpperCAmelCase , return_tensors="""pt""" )
def a_ ( self : Optional[int] , __lowerCAmelCase : Optional[Any] ) -> List[Any]:
"""simple docstring"""
with torch.no_grad():
A__ = self.model(**_UpperCAmelCase ).logits
return logits
def a_ ( self : List[Any] , __lowerCAmelCase : Optional[int] ) -> Any:
"""simple docstring"""
A__ = outputs.cpu().detach().numpy()
A__ = 0
A__ = 1
return Image.fromarray((array * 2_55).astype(np.uinta ) )
| 274 |
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> np.array:
_a : Optional[int] = f"""{sampling_rate}"""
_a : Any = '1'
_a : Optional[int] = 'f32le'
_a : Any = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(lowerCAmelCase_ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
_a : int = ffmpeg_process.communicate(lowerCAmelCase_ )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
_a : int = output_stream[0]
_a : List[str] = np.frombuffer(lowerCAmelCase_ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = "f32le" , ) -> Union[str, Any]:
_a : List[str] = f"""{sampling_rate}"""
_a : List[str] = '1'
if format_for_conversion == "s16le":
_a : List[Any] = 2
elif format_for_conversion == "f32le":
_a : Dict = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
_a : Any = platform.system()
if system == "Linux":
_a : Union[str, Any] = 'alsa'
_a : Union[str, Any] = 'default'
elif system == "Darwin":
_a : Any = 'avfoundation'
_a : Optional[int] = ':0'
elif system == "Windows":
_a : str = 'dshow'
_a : Tuple = 'default'
_a : str = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
_a : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
_a : Union[str, Any] = _ffmpeg_stream(lowerCAmelCase_ , lowerCAmelCase_ )
for item in iterator:
yield item
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = "f32le" , ) -> str:
if stream_chunk_s is not None:
_a : str = stream_chunk_s
else:
_a : List[str] = chunk_length_s
_a : int = ffmpeg_microphone(lowerCAmelCase_ , lowerCAmelCase_ , format_for_conversion=lowerCAmelCase_ )
if format_for_conversion == "s16le":
_a : Optional[Any] = np.intaa
_a : List[Any] = 2
elif format_for_conversion == "f32le":
_a : Tuple = np.floataa
_a : Any = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
_a : str = chunk_length_s / 6
_a : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(lowerCAmelCase_ , (int, float) ):
_a : List[str] = [stride_length_s, stride_length_s]
_a : str = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
_a : List[str] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
_a : Any = datetime.datetime.now()
_a : Dict = datetime.timedelta(seconds=lowerCAmelCase_ )
for item in chunk_bytes_iter(lowerCAmelCase_ , lowerCAmelCase_ , stride=(stride_left, stride_right) , stream=lowerCAmelCase_ ):
# Put everything back in numpy scale
_a : List[Any] = np.frombuffer(item['raw'] , dtype=lowerCAmelCase_ )
_a : List[str] = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
_a : Union[str, Any] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False ) -> List[Any]:
_a : Tuple = B''
_a , _a : str = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
_a : Optional[int] = 0
for raw in iterator:
acc += raw
if stream and len(lowerCAmelCase_ ) < chunk_len:
_a : str = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(lowerCAmelCase_ ) >= chunk_len:
# We are flushing the accumulator
_a : Union[str, Any] = (_stride_left, stride_right)
_a : Dict = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
_a : List[str] = False
yield item
_a : int = stride_left
_a : List[Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(lowerCAmelCase_ ) > stride_left:
_a : str = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
_a : str = False
yield item
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
_a : Optional[Any] = 2**24 # 16Mo
try:
with subprocess.Popen(lowerCAmelCase_ , stdout=subprocess.PIPE , bufsize=lowerCAmelCase_ ) as ffmpeg_process:
while True:
_a : Any = ffmpeg_process.stdout.read(lowerCAmelCase_ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 89 | 0 |
class a__ :
def __init__( self : str,_A : list[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = len(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Optional[Any] = [0] * len_array
if len_array > 0:
SCREAMING_SNAKE_CASE_ : List[Any] = array[0]
for i in range(1,_UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : Any = self.prefix_sum[i - 1] + array[i]
def __UpperCamelCase ( self : Optional[int],_A : int,_A : int ):
"""simple docstring"""
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def __UpperCamelCase ( self : Dict,_A : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(_UpperCAmelCase )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 |
'''simple docstring'''
__lowerCAmelCase = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]:
_a : List[Any] = set()
# keep track of all the paths to be checked
_a : Any = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
_a : Tuple = queue.pop(0 )
# get the last node from the path
_a : Tuple = path[-1]
if node not in explored:
_a : Optional[Any] = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
_a : Any = list(lowerCAmelCase_ )
new_path.append(lowerCAmelCase_ )
queue.append(lowerCAmelCase_ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(lowerCAmelCase_ )
# in case there's no path between the 2 nodes
return []
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
_a : Optional[int] = [start]
_a : Dict = set(lowerCAmelCase_ )
# Keep tab on distances from `start` node.
_a : Dict = {start: 0, target: -1}
while queue:
_a : List[str] = queue.pop(0 )
if node == target:
_a : Any = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(lowerCAmelCase_ )
queue.append(lowerCAmelCase_ )
_a : Any = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 89 | 0 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
lowercase__ : Optional[int] = logging.get_logger(__name__)
lowercase__ : Any = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
lowercase__ : Optional[int] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Optional[Any]:
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
lowerCAmelCase = model_type_to_module_name(lowerCAmelCase_ )
lowerCAmelCase = importlib.import_module(f".{module_name}" , '''transformers.models''' )
try:
return getattr(lowerCAmelCase_ , lowerCAmelCase_ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(lowerCAmelCase_ , '''__name__''' , lowerCAmelCase_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowerCAmelCase = importlib.import_module('''transformers''' )
if hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
return getattr(lowerCAmelCase_ , lowerCAmelCase_ )
return None
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ = None , snake_case__ = False , snake_case__ = False , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = False , **snake_case__ , ) -> Tuple:
lowerCAmelCase = get_file_from_repo(
lowerCAmelCase_ , lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , resume_download=lowerCAmelCase_ , proxies=lowerCAmelCase_ , use_auth_token=lowerCAmelCase_ , revision=lowerCAmelCase_ , local_files_only=lowerCAmelCase_ , )
if resolved_config_file is None:
logger.info(
'''Could not locate the image processor configuration file, will try to use the model config instead.''' )
return {}
with open(lowerCAmelCase_ , encoding='''utf-8''' ) as reader:
return json.load(lowerCAmelCase_ )
class lowercase_ :
"""simple docstring"""
def __init__( self ) ->Optional[int]:
raise EnvironmentError(
'''AutoImageProcessor is designed to be instantiated '''
'''using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(_UpperCAmelCase )
def SCREAMING_SNAKE_CASE_ ( cls , __SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE ) ->List[Any]:
lowerCAmelCase = kwargs.pop('''config''' , _UpperCAmelCase )
lowerCAmelCase = kwargs.pop('''trust_remote_code''' , _UpperCAmelCase )
lowerCAmelCase = True
lowerCAmelCase = ImageProcessingMixin.get_image_processor_dict(_UpperCAmelCase , **_UpperCAmelCase )
lowerCAmelCase = config_dict.get('''image_processor_type''' , _UpperCAmelCase )
lowerCAmelCase = None
if "AutoImageProcessor" in config_dict.get('''auto_map''' , {} ):
lowerCAmelCase = config_dict['auto_map']['AutoImageProcessor']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
lowerCAmelCase = config_dict.pop('''feature_extractor_type''' , _UpperCAmelCase )
if feature_extractor_class is not None:
logger.warning(
'''Could not find image processor class in the image processor config or the model config. Loading'''
''' based on pattern matching with the model\'s feature extractor configuration.''' )
lowerCAmelCase = feature_extractor_class.replace('''FeatureExtractor''' , '''ImageProcessor''' )
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
lowerCAmelCase = config_dict['auto_map']['AutoFeatureExtractor']
lowerCAmelCase = feature_extractor_auto_map.replace('''FeatureExtractor''' , '''ImageProcessor''' )
logger.warning(
'''Could not find image processor auto map in the image processor config or the model config.'''
''' Loading based on pattern matching with the model\'s feature extractor configuration.''' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowerCAmelCase = AutoConfig.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
# It could be in `config.image_processor_type``
lowerCAmelCase = getattr(_UpperCAmelCase , '''image_processor_type''' , _UpperCAmelCase )
if hasattr(_UpperCAmelCase , '''auto_map''' ) and "AutoImageProcessor" in config.auto_map:
lowerCAmelCase = config.auto_map['AutoImageProcessor']
if image_processor_class is not None:
lowerCAmelCase = image_processor_class_from_name(_UpperCAmelCase )
lowerCAmelCase = image_processor_auto_map is not None
lowerCAmelCase = image_processor_class is not None or type(_UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING
lowerCAmelCase = resolve_trust_remote_code(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
if has_remote_code and trust_remote_code:
lowerCAmelCase = get_class_from_dynamic_module(
_UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase )
lowerCAmelCase = kwargs.pop('''code_revision''' , _UpperCAmelCase )
if os.path.isdir(_UpperCAmelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(_UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING:
lowerCAmelCase = IMAGE_PROCESSOR_MAPPING[type(_UpperCAmelCase )]
return image_processor_class.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
raise ValueError(
F"Unrecognized image processor in {pretrained_model_name_or_path}. Should have a "
F"`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following "
F"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}" )
@staticmethod
def SCREAMING_SNAKE_CASE_ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ->Any:
IMAGE_PROCESSOR_MAPPING.register(_UpperCAmelCase , _UpperCAmelCase )
| 338 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase = {'''configuration_swin''': ['''SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SwinConfig''', '''SwinOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwinForImageClassification''',
'''SwinForMaskedImageModeling''',
'''SwinModel''',
'''SwinPreTrainedModel''',
'''SwinBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSwinForImageClassification''',
'''TFSwinForMaskedImageModeling''',
'''TFSwinModel''',
'''TFSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89 | 0 |
"""simple docstring"""
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
__snake_case : int = 'true'
def _lowercase ( __snake_case ,__snake_case=82 ,__snake_case=16 ) -> Union[str, Any]:
set_seed(42 )
__lowerCAmelCase : Union[str, Any] = RegressionModel()
__lowerCAmelCase : Dict = deepcopy(lowerCAmelCase_ )
__lowerCAmelCase : Any = RegressionDataset(length=lowerCAmelCase_ )
__lowerCAmelCase : List[str] = DataLoader(lowerCAmelCase_ ,batch_size=lowerCAmelCase_ )
model.to(accelerator.device )
__lowerCAmelCase : int = accelerator.prepare(lowerCAmelCase_ ,lowerCAmelCase_ )
return model, ddp_model, dataloader
def _lowercase ( __snake_case ,__snake_case=False ) -> Optional[int]:
__lowerCAmelCase : Optional[int] = AutoTokenizer.from_pretrained("hf-internal-testing/mrpc-bert-base-cased" )
__lowerCAmelCase : Dict = load_dataset("glue" ,"mrpc" ,split="validation" )
def tokenize_function(__snake_case ):
__lowerCAmelCase : Optional[Any] = tokenizer(examples["sentence1"] ,examples["sentence2"] ,truncation=lowerCAmelCase_ ,max_length=lowerCAmelCase_ )
return outputs
with accelerator.main_process_first():
__lowerCAmelCase : int = dataset.map(
lowerCAmelCase_ ,batched=lowerCAmelCase_ ,remove_columns=["idx", "sentence1", "sentence2"] ,)
__lowerCAmelCase : Union[str, Any] = tokenized_datasets.rename_column("label" ,"labels" )
def collate_fn(__snake_case ):
if use_longest:
return tokenizer.pad(lowerCAmelCase_ ,padding="longest" ,return_tensors="pt" )
return tokenizer.pad(lowerCAmelCase_ ,padding="max_length" ,max_length=128 ,return_tensors="pt" )
return DataLoader(lowerCAmelCase_ ,shuffle=lowerCAmelCase_ ,collate_fn=lowerCAmelCase_ ,batch_size=16 )
def _lowercase ( __snake_case ,__snake_case ) -> str:
__lowerCAmelCase : Optional[int] = Accelerator(dispatch_batches=lowerCAmelCase_ ,split_batches=lowerCAmelCase_ )
__lowerCAmelCase : Optional[Any] = get_dataloader(lowerCAmelCase_ ,not dispatch_batches )
__lowerCAmelCase : List[str] = AutoModelForSequenceClassification.from_pretrained(
"hf-internal-testing/mrpc-bert-base-cased" ,return_dict=lowerCAmelCase_ )
__lowerCAmelCase : Optional[int] = accelerator.prepare(lowerCAmelCase_ ,lowerCAmelCase_ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> List[str]:
__lowerCAmelCase : Union[str, Any] = []
for batch in dataloader:
__lowerCAmelCase : Any = batch.values()
with torch.no_grad():
__lowerCAmelCase : str = model(lowerCAmelCase_ )
__lowerCAmelCase : Optional[int] = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
__lowerCAmelCase : Dict = [], []
for logit, targ in logits_and_targets:
logits.append(lowerCAmelCase_ )
targs.append(lowerCAmelCase_ )
__lowerCAmelCase : Dict = torch.cat(lowerCAmelCase_ ), torch.cat(lowerCAmelCase_ )
return logits, targs
def _lowercase ( __snake_case ,__snake_case=82 ,__snake_case=False ,__snake_case=False ,__snake_case=16 ) -> List[Any]:
__lowerCAmelCase : int = get_basic_setup(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
__lowerCAmelCase : Union[str, Any] = generate_predictions(lowerCAmelCase_ ,lowerCAmelCase_ ,lowerCAmelCase_ )
assert (
len(lowerCAmelCase_ ) == num_samples
), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowerCAmelCase_ )}"""
def _lowercase ( __snake_case = False ,__snake_case = False ) -> List[Any]:
__lowerCAmelCase : Optional[Any] = evaluate.load("glue" ,"mrpc" )
__lowerCAmelCase : Any = get_mrpc_setup(lowerCAmelCase_ ,lowerCAmelCase_ )
# First do baseline
__lowerCAmelCase : str = setup['no']
model.to(lowerCAmelCase_ )
model.eval()
for batch in dataloader:
batch.to(lowerCAmelCase_ )
with torch.inference_mode():
__lowerCAmelCase : List[Any] = model(**lowerCAmelCase_ )
__lowerCAmelCase : Optional[Any] = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowerCAmelCase_ ,references=batch["labels"] )
__lowerCAmelCase : Any = metric.compute()
# Then do distributed
__lowerCAmelCase : int = setup['ddp']
model.eval()
for batch in dataloader:
with torch.inference_mode():
__lowerCAmelCase : List[str] = model(**lowerCAmelCase_ )
__lowerCAmelCase : int = outputs.logits.argmax(dim=-1 )
__lowerCAmelCase : Optional[int] = batch['labels']
__lowerCAmelCase : Dict = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowerCAmelCase_ ,references=lowerCAmelCase_ )
__lowerCAmelCase : Any = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] ,distributed[key] ), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def _lowercase ( ) -> str:
__lowerCAmelCase : Optional[int] = Accelerator(split_batches=lowerCAmelCase_ ,dispatch_batches=lowerCAmelCase_ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("**Testing gather_for_metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(lowerCAmelCase_ ,lowerCAmelCase_ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test torch metrics**" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
__lowerCAmelCase : Optional[Any] = Accelerator(split_batches=lowerCAmelCase_ ,dispatch_batches=lowerCAmelCase_ )
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(lowerCAmelCase_ ,99 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("**Test last batch is not dropped when perfectly divisible**" )
__lowerCAmelCase : int = Accelerator()
test_torch_metrics(lowerCAmelCase_ ,512 )
accelerator.state._reset_state()
def _lowercase ( __snake_case ) -> int:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main() | 269 |
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class __magic_name__ ( _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : Optional[int] = BarthezTokenizer
lowerCAmelCase : int = BarthezTokenizerFast
lowerCAmelCase : Dict = True
lowerCAmelCase : str = True
def __lowercase ( self : List[Any] ):
super().setUp()
_a : List[Any] = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname ,legacy_format=_UpperCAmelCase )
_a : Union[str, Any] = tokenizer
def __lowercase ( self : Tuple ):
_a : Optional[Any] = '<pad>'
_a : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) ,_UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) ,_UpperCAmelCase )
def __lowercase ( self : str ):
_a : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<s>' )
self.assertEqual(vocab_keys[1] ,'<pad>' )
self.assertEqual(vocab_keys[-1] ,'<mask>' )
self.assertEqual(len(_UpperCAmelCase ) ,101122 )
def __lowercase ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size ,101122 )
@require_torch
def __lowercase ( self : Dict ):
_a : Any = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_a : Dict = [0, 57, 3018, 70307, 91, 2]
_a : Dict = self.tokenizer(
_UpperCAmelCase ,max_length=len(_UpperCAmelCase ) ,padding=_UpperCAmelCase ,truncation=_UpperCAmelCase ,return_tensors='pt' )
self.assertIsInstance(_UpperCAmelCase ,_UpperCAmelCase )
self.assertEqual((2, 6) ,batch.input_ids.shape )
self.assertEqual((2, 6) ,batch.attention_mask.shape )
_a : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ):
if not self.test_rust_tokenizer:
return
_a : str = self.get_tokenizer()
_a : List[str] = self.get_rust_tokenizer()
_a : Dict = 'I was born in 92000, and this is falsé.'
_a : List[Any] = tokenizer.tokenize(_UpperCAmelCase )
_a : Tuple = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
_a : Optional[Any] = tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase )
_a : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
_a : Union[str, Any] = self.get_rust_tokenizer()
_a : Any = tokenizer.encode(_UpperCAmelCase )
_a : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
@slow
def __lowercase ( self : Optional[int] ):
# fmt: off
_a : Optional[int] = {'input_ids': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_a : Optional[Any] = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase ,model_name='moussaKam/mbarthez' ,revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' ,sequences=_UpperCAmelCase ,)
| 89 | 0 |
import requests
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> None:
UpperCamelCase : Tuple = {'Content-Type': 'application/json'}
UpperCamelCase : Tuple = requests.post(lowerCAmelCase_ , json={"text": message_body} , headers=lowerCAmelCase_ )
if response.status_code != 200:
UpperCamelCase : Optional[int] = (
'Request to slack returned an error '
F"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(lowerCAmelCase_ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("""<YOUR MESSAGE BODY>""", """<SLACK CHANNEL URL>""")
| 52 |
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __magic_name__ ( _UpperCamelCase ):
@require_torch
def __lowercase ( self : Tuple ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a : Optional[int] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_a : List[str] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_a : Tuple = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_a : List[Any] = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_UpperCAmelCase )
BertModel.from_pretrained(_UpperCAmelCase )
BertTokenizer.from_pretrained(_UpperCAmelCase )
pipeline(task='fill-mask' ,model=_UpperCAmelCase )
# baseline - just load from_pretrained with normal network
_a : Optional[int] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_a : Tuple = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : int = '1'
_a : List[Any] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def __lowercase ( self : Any ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a : Dict = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_a : Optional[int] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_a : Optional[Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_a : int = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_UpperCAmelCase )
BertModel.from_pretrained(_UpperCAmelCase )
BertTokenizer.from_pretrained(_UpperCAmelCase )
pipeline(task='fill-mask' ,model=_UpperCAmelCase )
# baseline - just load from_pretrained with normal network
_a : Optional[int] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_a : str = self.get_env()
_a : Optional[Any] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def __lowercase ( self : List[str] ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a : Union[str, Any] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
_a : Optional[Any] = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
_a : str = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
_a : Optional[Any] = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_a : Dict = self.get_env()
_a : int = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
# next emulate no network
_a : List[Any] = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : int = '1'
_a : Any = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def __lowercase ( self : int ):
_a : Optional[Any] = '\nfrom transformers import pipeline\n '
_a : str = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
_a : List[str] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
_a : List[Any] = self.get_env()
_a : Dict = '1'
_a : Dict = [sys.executable, '-c', '\n'.join([load, mock, run] )]
_a : str = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,1 ,result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' ,result.stderr.decode().replace('\n' ,'' ) ,)
@require_torch
def __lowercase ( self : int ):
_a : Optional[int] = '\nfrom transformers import AutoModel\n '
_a : List[Any] = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
_a : Union[str, Any] = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_a : Tuple = self.get_env()
_a : List[str] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : Optional[Any] = '1'
_a : Any = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
| 89 | 0 |
from __future__ import annotations
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : str ) -> int:
"""simple docstring"""
if not nums:
return 0
SCREAMING_SNAKE_CASE__ = nums[0]
SCREAMING_SNAKE_CASE__ = 0
for num in nums[1:]:
SCREAMING_SNAKE_CASE__ = (
max_excluding + num,
max(lowerCAmelCase_ , lowerCAmelCase_ ),
)
return max(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 219 |
'''simple docstring'''
def __lowerCamelCase ( ) -> Tuple:
for n in range(1 , 1000000 ):
yield n * (n + 1) // 2
def __lowerCamelCase ( lowerCAmelCase_ ) -> List[Any]:
_a : Any = 1
_a : Tuple = 2
while i * i <= n:
_a : Tuple = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def __lowerCamelCase ( ) -> str:
return next(i for i in triangle_number_generator() if count_divisors(lowerCAmelCase_ ) > 500 )
if __name__ == "__main__":
print(solution())
| 89 | 0 |
"""simple docstring"""
import argparse
from torch import nn
# transformers_old should correspond to branch `save_old_prophetnet_model_structure` here
# original prophetnet_checkpoints are saved under `patrickvonplaten/..._old` respectively
from transformers_old.modeling_prophetnet import (
ProphetNetForConditionalGeneration as ProphetNetForConditionalGenerationOld,
)
from transformers_old.modeling_xlm_prophetnet import (
XLMProphetNetForConditionalGeneration as XLMProphetNetForConditionalGenerationOld,
)
from transformers import ProphetNetForConditionalGeneration, XLMProphetNetForConditionalGeneration, logging
_UpperCamelCase: Tuple = logging.get_logger(__name__)
logging.set_verbosity_info()
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
if "xprophetnet" in prophetnet_checkpoint_path:
lowercase : List[Any] = XLMProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase_ )
lowercase : int = XLMProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ )
else:
lowercase : Union[str, Any] = ProphetNetForConditionalGenerationOld.from_pretrained(lowerCAmelCase_ )
lowercase : Union[str, Any] = ProphetNetForConditionalGeneration.from_pretrained(
lowerCAmelCase_ , output_loading_info=lowerCAmelCase_ )
lowercase : Any = ['key_proj', 'value_proj', 'query_proj']
lowercase : Dict = {
'self_attn': 'ngram_self_attn',
'cross_attn': 'encoder_attn',
'cross_attn_layer_norm': 'encoder_attn_layer_norm',
'feed_forward_layer_norm': 'final_layer_norm',
'feed_forward': '',
'intermediate': 'fc1',
'output': 'fc2',
'key_proj': 'k_proj',
'query_proj': 'q_proj',
'value_proj': 'v_proj',
'word_embeddings': 'embed_tokens',
'embeddings_layer_norm': 'emb_layer_norm',
'relative_pos_embeddings': 'relative_linear',
'ngram_embeddings': 'ngram_input_embed',
'position_embeddings': 'embed_positions',
}
for key in loading_info["missing_keys"]:
lowercase : Union[str, Any] = key.split('.' )
if attributes[0] == "lm_head":
lowercase : Optional[Any] = prophet
lowercase : str = prophet_old
else:
lowercase : List[str] = prophet.prophetnet
lowercase : Optional[Any] = prophet_old.model
lowercase : Optional[Any] = False
for attribute in attributes:
if attribute in mapping:
lowercase : Tuple = mapping[attribute]
if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) > 0:
lowercase : Tuple = attribute
elif hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
lowercase : Optional[int] = attribute
if attribute == "weight":
assert old_model.weight.shape == model.weight.shape, "Shapes have to match!"
lowercase : Any = old_model.weight
logger.info(f'''{attribute} is initialized.''' )
lowercase : List[Any] = True
break
elif attribute == "bias":
assert old_model.bias.shape == model.bias.shape, "Shapes have to match!"
lowercase : Optional[Any] = old_model.bias
logger.info(f'''{attribute} is initialized''' )
lowercase : List[str] = True
break
elif attribute in special_keys and hasattr(lowerCAmelCase_ , 'in_proj_weight' ):
lowercase : Optional[Any] = old_model.in_proj_weight.shape[0] // 3
lowercase : Dict = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
param.weight.shape == old_model.in_proj_weight[:embed_dim, :].shape, "Shapes have to match"
param.bias.shape == old_model.in_proj_bias[:embed_dim].shape, "Shapes have to match"
if attribute == "query_proj":
lowercase : Dict = nn.Parameter(old_model.in_proj_weight[:embed_dim, :] )
lowercase : Tuple = nn.Parameter(old_model.in_proj_bias[:embed_dim] )
elif attribute == "key_proj":
lowercase : str = nn.Parameter(old_model.in_proj_weight[embed_dim : 2 * embed_dim, :] )
lowercase : Optional[Any] = nn.Parameter(old_model.in_proj_bias[embed_dim : 2 * embed_dim] )
elif attribute == "value_proj":
lowercase : Optional[Any] = nn.Parameter(old_model.in_proj_weight[2 * embed_dim :, :] )
lowercase : Union[str, Any] = nn.Parameter(old_model.in_proj_bias[2 * embed_dim :] )
lowercase : Dict = True
break
elif attribute == "position_embeddings":
assert (
model.position_embeddings.weight.shape[-1] == old_model.embed_positions.weight.shape[-1]
), "Hidden size has to match"
assert model.position_embeddings.weight.shape[0] == 5_12, "We want 512 position_embeddings."
lowercase : Union[str, Any] = nn.Parameter(old_model.embed_positions.weight[:5_12, :] )
lowercase : Optional[int] = True
break
if attribute.isdigit():
lowercase : Tuple = model[int(lowerCAmelCase_ )]
lowercase : Dict = old_model[int(lowerCAmelCase_ )]
else:
lowercase : List[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if old_attribute == "":
lowercase : Optional[Any] = old_model
else:
if not hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
raise ValueError(f'''{old_model} does not have {old_attribute}''' )
lowercase : List[Any] = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if not is_key_init:
raise ValueError(f'''{key} was not correctly initialized!''' )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
prophet.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
_UpperCamelCase: Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--prophetnet_checkpoint_path', default=None, type=str, required=True, help='Path the official PyTorch dump.'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
_UpperCamelCase: Any = parser.parse_args()
convert_prophetnet_checkpoint_to_pytorch(args.prophetnet_checkpoint_path, args.pytorch_dump_folder_path)
| 255 |
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class __magic_name__ ( _UpperCamelCase ):
def __init__( self : Optional[int] ,_UpperCAmelCase : Union[str, "sqlalchemy.sql.Selectable"] ,_UpperCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] ,_UpperCAmelCase : Optional[Features] = None ,_UpperCAmelCase : str = None ,_UpperCAmelCase : bool = False ,**_UpperCAmelCase : Dict ,):
super().__init__(features=_UpperCAmelCase ,cache_dir=_UpperCAmelCase ,keep_in_memory=_UpperCAmelCase ,**_UpperCAmelCase )
_a : Tuple = Sql(
cache_dir=_UpperCAmelCase ,features=_UpperCAmelCase ,sql=_UpperCAmelCase ,con=_UpperCAmelCase ,**_UpperCAmelCase ,)
def __lowercase ( self : Dict ):
_a : Optional[Any] = None
_a : Dict = None
_a : Dict = None
_a : Optional[int] = None
self.builder.download_and_prepare(
download_config=_UpperCAmelCase ,download_mode=_UpperCAmelCase ,verification_mode=_UpperCAmelCase ,base_path=_UpperCAmelCase ,)
# Build dataset for splits
_a : List[str] = self.builder.as_dataset(
split='train' ,verification_mode=_UpperCAmelCase ,in_memory=self.keep_in_memory )
return dataset
class __magic_name__ :
def __init__( self : Optional[int] ,_UpperCAmelCase : Dataset ,_UpperCAmelCase : str ,_UpperCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] ,_UpperCAmelCase : Optional[int] = None ,_UpperCAmelCase : Optional[int] = None ,**_UpperCAmelCase : Dict ,):
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" )
_a : Dict = dataset
_a : List[Any] = name
_a : Tuple = con
_a : Union[str, Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_a : List[Any] = num_proc
_a : Tuple = to_sql_kwargs
def __lowercase ( self : List[Any] ):
_a : Tuple = self.to_sql_kwargs.pop('sql' ,_UpperCAmelCase )
_a : str = self.to_sql_kwargs.pop('con' ,_UpperCAmelCase )
_a : Optional[Any] = self.to_sql_kwargs.pop('index' ,_UpperCAmelCase )
_a : Any = self._write(index=_UpperCAmelCase ,**self.to_sql_kwargs )
return written
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Dict ):
_a , _a , _a : Any = args
_a : Tuple = {**to_sql_kwargs, 'if_exists': 'append'} if offset > 0 else to_sql_kwargs
_a : Dict = query_table(
table=self.dataset.data ,key=slice(_UpperCAmelCase ,offset + self.batch_size ) ,indices=self.dataset._indices ,)
_a : Tuple = batch.to_pandas()
_a : Dict = df.to_sql(self.name ,self.con ,index=_UpperCAmelCase ,**_UpperCAmelCase )
return num_rows or len(_UpperCAmelCase )
def __lowercase ( self : int ,_UpperCAmelCase : Optional[int] ,**_UpperCAmelCase : List[Any] ):
_a : Union[str, Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 ,len(self.dataset ) ,self.batch_size ) ,unit='ba' ,disable=not logging.is_progress_bar_enabled() ,desc='Creating SQL from Arrow format' ,):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
_a , _a : List[Any] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql ,[(offset, index, to_sql_kwargs) for offset in range(0 ,_UpperCAmelCase ,_UpperCAmelCase )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit='ba' ,disable=not logging.is_progress_bar_enabled() ,desc='Creating SQL from Arrow format' ,):
written += num_rows
return written
| 89 | 0 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
ControlNetModel,
DDIMScheduler,
StableDiffusionControlNetImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
from diffusers.utils import floats_tensor, load_image, load_numpy, randn_tensor, slow, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
)
enable_full_determinism()
class UpperCamelCase__ ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = StableDiffusionControlNetImgaImgPipeline
SCREAMING_SNAKE_CASE__ : Tuple = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
SCREAMING_SNAKE_CASE__ : Tuple = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS.union({"control_image"} )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = IMAGE_TO_IMAGE_IMAGE_PARAMS
def A_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : List[str] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
torch.manual_seed(0 )
UpperCAmelCase : Optional[Any] = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
torch.manual_seed(0 )
UpperCAmelCase : List[str] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , )
torch.manual_seed(0 )
UpperCAmelCase : List[str] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase : List[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
UpperCAmelCase : str = CLIPTextModel(_UpperCAmelCase )
UpperCAmelCase : List[str] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase : Tuple = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def A_ ( self , snake_case , snake_case=0 ):
'''simple docstring'''
if str(_UpperCAmelCase ).startswith("mps" ):
UpperCAmelCase : Optional[Any] = torch.manual_seed(_UpperCAmelCase )
else:
UpperCAmelCase : Dict = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
UpperCAmelCase : int = 2
UpperCAmelCase : str = randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=_UpperCAmelCase , device=torch.device(_UpperCAmelCase ) , )
UpperCAmelCase : int = floats_tensor(control_image.shape , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
UpperCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase : List[Any] = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert("RGB" ).resize((6_4, 6_4) )
UpperCAmelCase : List[str] = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def A_ ( self ):
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def A_ ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def A_ ( self ):
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
class UpperCamelCase__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = StableDiffusionControlNetImgaImgPipeline
SCREAMING_SNAKE_CASE__ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width'}
SCREAMING_SNAKE_CASE__ : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
SCREAMING_SNAKE_CASE__ : List[Any] = frozenset([] ) # TO_DO: add image_params once refactored VaeImageProcessor.preprocess
def A_ ( self ):
'''simple docstring'''
torch.manual_seed(0 )
UpperCAmelCase : Dict = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
torch.manual_seed(0 )
def init_weights(snake_case ):
if isinstance(_UpperCAmelCase , torch.nn.Convad ):
torch.nn.init.normal(m.weight )
m.bias.data.fill_(1.0 )
UpperCAmelCase : Union[str, Any] = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
controlneta.controlnet_down_blocks.apply(_UpperCAmelCase )
torch.manual_seed(0 )
UpperCAmelCase : Union[str, Any] = ControlNetModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , in_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , cross_attention_dim=3_2 , conditioning_embedding_out_channels=(1_6, 3_2) , )
controlneta.controlnet_down_blocks.apply(_UpperCAmelCase )
torch.manual_seed(0 )
UpperCAmelCase : Optional[int] = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=_UpperCAmelCase , set_alpha_to_one=_UpperCAmelCase , )
torch.manual_seed(0 )
UpperCAmelCase : List[str] = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase : Dict = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
UpperCAmelCase : Optional[int] = CLIPTextModel(_UpperCAmelCase )
UpperCAmelCase : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase : str = MultiControlNetModel([controlneta, controlneta] )
UpperCAmelCase : Tuple = {
'unet': unet,
'controlnet': controlnet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def A_ ( self , snake_case , snake_case=0 ):
'''simple docstring'''
if str(_UpperCAmelCase ).startswith("mps" ):
UpperCAmelCase : List[str] = torch.manual_seed(_UpperCAmelCase )
else:
UpperCAmelCase : Dict = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
UpperCAmelCase : str = 2
UpperCAmelCase : Union[str, Any] = [
randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=_UpperCAmelCase , device=torch.device(_UpperCAmelCase ) , ),
randn_tensor(
(1, 3, 3_2 * controlnet_embedder_scale_factor, 3_2 * controlnet_embedder_scale_factor) , generator=_UpperCAmelCase , device=torch.device(_UpperCAmelCase ) , ),
]
UpperCAmelCase : str = floats_tensor(control_image[0].shape , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
UpperCAmelCase : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
UpperCAmelCase : Optional[Any] = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert("RGB" ).resize((6_4, 6_4) )
UpperCAmelCase : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
'image': image,
'control_image': control_image,
}
return inputs
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = self.get_dummy_components()
UpperCAmelCase : List[str] = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
UpperCAmelCase : Any = 10.0
UpperCAmelCase : Optional[Any] = 4
UpperCAmelCase : Tuple = self.get_dummy_inputs(_UpperCAmelCase )
UpperCAmelCase : Optional[Any] = steps
UpperCAmelCase : Optional[int] = scale
UpperCAmelCase : Any = pipe(**_UpperCAmelCase )[0]
UpperCAmelCase : Any = self.get_dummy_inputs(_UpperCAmelCase )
UpperCAmelCase : Tuple = steps
UpperCAmelCase : List[str] = scale
UpperCAmelCase : List[Any] = pipe(**_UpperCAmelCase , control_guidance_start=0.1 , control_guidance_end=0.2 )[0]
UpperCAmelCase : str = self.get_dummy_inputs(_UpperCAmelCase )
UpperCAmelCase : Optional[Any] = steps
UpperCAmelCase : int = scale
UpperCAmelCase : List[str] = pipe(**_UpperCAmelCase , control_guidance_start=[0.1, 0.3] , control_guidance_end=[0.2, 0.7] )[0]
UpperCAmelCase : str = self.get_dummy_inputs(_UpperCAmelCase )
UpperCAmelCase : int = steps
UpperCAmelCase : str = scale
UpperCAmelCase : int = pipe(**_UpperCAmelCase , control_guidance_start=0.4 , control_guidance_end=[0.5, 0.8] )[0]
# make sure that all outputs are different
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
assert np.sum(np.abs(output_a - output_a ) ) > 1e-3
def A_ ( self ):
'''simple docstring'''
return self._test_attention_slicing_forward_pass(expected_max_diff=2e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def A_ ( self ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=2e-3 )
def A_ ( self ):
'''simple docstring'''
self._test_inference_batch_single_identical(expected_max_diff=2e-3 )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = self.get_dummy_components()
UpperCAmelCase : Dict = self.pipeline_class(**_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdir:
try:
# save_pretrained is not implemented for Multi-ControlNet
pipe.save_pretrained(_UpperCAmelCase )
except NotImplementedError:
pass
@slow
@require_torch_gpu
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny" )
UpperCAmelCase : Tuple = StableDiffusionControlNetImgaImgPipeline.from_pretrained(
"runwayml/stable-diffusion-v1-5" , safety_checker=_UpperCAmelCase , controlnet=_UpperCAmelCase )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
UpperCAmelCase : Tuple = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase : Union[str, Any] = 'evil space-punk bird'
UpperCAmelCase : str = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/bird_canny.png" ).resize((5_1_2, 5_1_2) )
UpperCAmelCase : Optional[int] = load_image(
"https://huggingface.co/lllyasviel/sd-controlnet-canny/resolve/main/images/bird.png" ).resize((5_1_2, 5_1_2) )
UpperCAmelCase : Optional[Any] = pipe(
_UpperCAmelCase , _UpperCAmelCase , control_image=_UpperCAmelCase , generator=_UpperCAmelCase , output_type="np" , num_inference_steps=5_0 , strength=0.6 , )
UpperCAmelCase : Tuple = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
UpperCAmelCase : Union[str, Any] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd_controlnet/img2img.npy" )
assert np.abs(expected_image - image ).max() < 9e-2
| 311 |
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> np.ndarray:
_a : Union[str, Any] = cva.getAffineTransform(lowerCAmelCase_ , lowerCAmelCase_ )
return cva.warpAffine(lowerCAmelCase_ , lowerCAmelCase_ , (rows, cols) )
if __name__ == "__main__":
# read original image
__lowerCAmelCase = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
__lowerCAmelCase = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
__lowerCAmelCase , __lowerCAmelCase = gray_img.shape
# set different points to rotate image
__lowerCAmelCase = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
__lowerCAmelCase = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
__lowerCAmelCase = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
__lowerCAmelCase = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
__lowerCAmelCase = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
__lowerCAmelCase = plt.figure(1)
__lowerCAmelCase = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 89 | 0 |
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def _a( UpperCamelCase__ : List[Any], UpperCamelCase__ : Dict, UpperCamelCase__ : List[Any], UpperCamelCase__ : Optional[int], UpperCamelCase__ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] =cva.getAffineTransform(lowerCAmelCase_, lowerCAmelCase_ )
return cva.warpAffine(lowerCAmelCase_, lowerCAmelCase_, (rows, cols) )
if __name__ == "__main__":
# read original image
a_ = cva.imread(
str(Path(__file__).resolve().parent.parent / 'image_data' / 'lena.jpg')
)
# turn image in gray scale value
a_ = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
a_ , a_ = gray_img.shape
# set different points to rotate image
a_ = np.array([[5_0, 5_0], [2_0_0, 5_0], [5_0, 2_0_0]], np.floataa)
a_ = np.array([[1_0, 1_0_0], [2_0_0, 5_0], [1_0_0, 2_5_0]], np.floataa)
a_ = np.array([[5_0, 5_0], [1_5_0, 5_0], [1_2_0, 2_0_0]], np.floataa)
a_ = np.array([[1_0, 1_0_0], [8_0, 5_0], [1_8_0, 2_5_0]], np.floataa)
# add all rotated images in a list
a_ = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
a_ = plt.figure(1)
a_ = ['Original', 'Rotation 1', 'Rotation 2', 'Rotation 3']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, 'gray')
plt.title(titles[i])
plt.axis('off')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show() | 152 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
'''configuration_bigbird_pegasus''': [
'''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BigBirdPegasusConfig''',
'''BigBirdPegasusOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BigBirdPegasusForCausalLM''',
'''BigBirdPegasusForConditionalGeneration''',
'''BigBirdPegasusForQuestionAnswering''',
'''BigBirdPegasusForSequenceClassification''',
'''BigBirdPegasusModel''',
'''BigBirdPegasusPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89 | 0 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
UniSpeechConfig,
UniSpeechForCTC,
UniSpeechForPreTraining,
WavaVecaFeatureExtractor,
WavaVecaPhonemeCTCTokenizer,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowerCAmelCase__ : int = logging.get_logger(__name__)
lowerCAmelCase__ : Tuple = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''ctc_proj''',
'''mask_emb''': '''masked_spec_embed''',
}
lowerCAmelCase__ : List[Any] = [
'''ctc_proj''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
]
def UpperCamelCase__ ( A__ , A__ , A__ , A__ , A__ , A__ ) -> int:
for attribute in key.split('.' ):
if is_finetuned:
if attribute in ["quantizer", "project_q", "project_hid"]:
# those layers are only relevant for pretraining and should be dropped
return
if attribute == "ctc_proj":
# we should rename `ctc_proj` to `lm_head` for fine-tuned phoneme models
snake_case__ : List[Any] = 'lm_head'
snake_case__ : Tuple = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if weight_type is not None:
snake_case__ : int = getattr(lowerCAmelCase_ , lowerCAmelCase_ ).shape
else:
snake_case__ : List[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
snake_case__ : Optional[Any] = value
elif weight_type == "weight_g":
snake_case__ : str = value
elif weight_type == "weight_v":
snake_case__ : Optional[int] = value
elif weight_type == "bias":
snake_case__ : Any = value
else:
snake_case__ : Tuple = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def UpperCamelCase__ ( A__ , A__ , A__ ) -> Any:
snake_case__ : List[Any] = []
snake_case__ : Dict = fairseq_model.state_dict()
snake_case__ : Union[str, Any] = hf_model.unispeech.feature_extractor
for name, value in fairseq_dict.items():
snake_case__ : Tuple = False
if "conv_layers" in name:
load_conv_layer(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , hf_model.config.feat_extract_norm == 'group' , )
snake_case__ : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
snake_case__ : List[str] = 'unispeech.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
snake_case__ : int = True
if "*" in mapped_key:
snake_case__ : Dict = name.split(lowerCAmelCase_ )[0].split('.' )[-2]
snake_case__ : int = mapped_key.replace('*' , lowerCAmelCase_ )
if "weight_g" in name:
snake_case__ : Any = 'weight_g'
elif "weight_v" in name:
snake_case__ : List[Any] = 'weight_v'
elif "bias" in name:
snake_case__ : Union[str, Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case__ : int = 'weight'
else:
snake_case__ : List[str] = None
set_recursively(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
continue
if not is_used:
unused_weights.append(lowerCAmelCase_ )
logger.warning(F"""Unused weights: {unused_weights}""" )
def UpperCamelCase__ ( A__ , A__ , A__ , A__ , A__ ) -> Any:
snake_case__ : int = full_name.split('conv_layers.' )[-1]
snake_case__ : Tuple = name.split('.' )
snake_case__ : Optional[Any] = int(items[0] )
snake_case__ : List[Any] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
snake_case__ : int = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
snake_case__ : Optional[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
snake_case__ : List[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
snake_case__ : Optional[Any] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(lowerCAmelCase_ )
@torch.no_grad()
def UpperCamelCase__ ( A__ , A__ , A__=None , A__=None , A__=True ) -> Dict:
if config_path is not None:
snake_case__ : List[str] = UniSpeechConfig.from_pretrained(lowerCAmelCase_ )
else:
snake_case__ : Tuple = UniSpeechConfig()
if is_finetuned:
if dict_path:
snake_case__ : Optional[Any] = Dictionary.load_from_json(lowerCAmelCase_ )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
snake_case__ : Any = target_dict.pad_index
snake_case__ : Optional[Any] = target_dict.bos_index
snake_case__ : Optional[int] = target_dict.eos_index
snake_case__ : List[Any] = len(target_dict.symbols )
snake_case__ : Tuple = os.path.join(lowerCAmelCase_ , 'vocab.json' )
if not os.path.isdir(lowerCAmelCase_ ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(lowerCAmelCase_ ) )
return
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
snake_case__ : List[str] = target_dict.indices
# fairseq has the <pad> and <s> switched
snake_case__ : List[Any] = 42
snake_case__ : Any = 43
with open(lowerCAmelCase_ , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
snake_case__ : Optional[Any] = WavaVecaPhonemeCTCTokenizer(
lowerCAmelCase_ , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=lowerCAmelCase_ , )
snake_case__ : List[Any] = True if config.feat_extract_norm == 'layer' else False
snake_case__ : List[str] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=lowerCAmelCase_ , return_attention_mask=lowerCAmelCase_ , )
snake_case__ : Dict = WavaVecaProcessor(feature_extractor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
snake_case__ : List[str] = UniSpeechForCTC(lowerCAmelCase_ )
else:
snake_case__ : int = UniSpeechForPreTraining(lowerCAmelCase_ )
if is_finetuned:
snake_case__ : List[str] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] ), 'w2v_path': checkpoint_path} )
else:
snake_case__ : int = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
snake_case__ : Optional[Any] = model[0].eval()
recursively_load_weights(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
hf_unispeech.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
lowerCAmelCase__ : Dict = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
lowerCAmelCase__ : List[Any] = parser.parse_args()
convert_unispeech_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 143 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1024 , lowerCAmelCase_=1024 , lowerCAmelCase_=False , **lowerCAmelCase_ ) -> List[Any]:
_a : str = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
_a : List[Any] = SeqaSeqDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , type_path='train' , **lowerCAmelCase_ )
_a : List[str] = tok.pad_token_id
def get_lens(lowerCAmelCase_ ):
_a : Dict = tqdm(
DataLoader(lowerCAmelCase_ , batch_size=512 , num_workers=8 , shuffle=lowerCAmelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
_a : Union[str, Any] = []
for batch in dl:
_a : Optional[Any] = batch['input_ids'].ne(lowerCAmelCase_ ).sum(1 ).tolist()
_a : Optional[Any] = batch['labels'].ne(lowerCAmelCase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
max_lens.append(max(lowerCAmelCase_ , lowerCAmelCase_ ) )
else:
max_lens.extend(lowerCAmelCase_ )
return max_lens
_a : str = get_lens(lowerCAmelCase_ )
_a : Optional[int] = SeqaSeqDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , type_path='val' , **lowerCAmelCase_ )
_a : Dict = get_lens(lowerCAmelCase_ )
pickle_save(lowerCAmelCase_ , train_ds.len_file )
pickle_save(lowerCAmelCase_ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 89 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a = logging.get_logger(__name__)
_a = {
'''caidas/swin2sr-classicalsr-x2-64''': (
'''https://huggingface.co/caidas/swin2sr-classicalsr-x2-64/resolve/main/config.json'''
),
}
class __lowerCamelCase ( _UpperCamelCase):
"""simple docstring"""
UpperCamelCase__ = 'swin2sr'
UpperCamelCase__ = {
'hidden_size': 'embed_dim',
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self , UpperCAmelCase=64 , UpperCAmelCase=1 , UpperCAmelCase=3 , UpperCAmelCase=180 , UpperCAmelCase=[6, 6, 6, 6, 6, 6] , UpperCAmelCase=[6, 6, 6, 6, 6, 6] , UpperCAmelCase=8 , UpperCAmelCase=2.0 , UpperCAmelCase=True , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.1 , UpperCAmelCase="gelu" , UpperCAmelCase=False , UpperCAmelCase=0.02 , UpperCAmelCase=1e-5 , UpperCAmelCase=2 , UpperCAmelCase=1.0 , UpperCAmelCase="1conv" , UpperCAmelCase="pixelshuffle" , **UpperCAmelCase , ):
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = len(_UpperCAmelCase )
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = hidden_act
_UpperCAmelCase = use_absolute_embeddings
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
_UpperCAmelCase = upscale
_UpperCAmelCase = img_range
_UpperCAmelCase = resi_connection
_UpperCAmelCase = upsampler
| 39 |
'''simple docstring'''
from typing import Any
class __magic_name__ :
def __init__( self : List[Any] ,_UpperCAmelCase : Any ):
_a : List[Any] = data
_a : Union[str, Any] = None
def __repr__( self : Any ):
return F"""Node({self.data})"""
class __magic_name__ :
def __init__( self : int ):
_a : Tuple = None
def __iter__( self : str ):
_a : int = self.head
while node:
yield node.data
_a : Union[str, Any] = node.next
def __len__( self : Optional[Any] ):
return sum(1 for _ in self )
def __repr__( self : str ):
return "->".join([str(_UpperCAmelCase ) for item in self] )
def __getitem__( self : Tuple ,_UpperCAmelCase : int ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : Union[str, Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Any ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
_a : Any = self.head
for _ in range(_UpperCAmelCase ):
_a : Optional[Any] = current.next
_a : Optional[int] = data
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Any ):
self.insert_nth(len(self ) ,_UpperCAmelCase )
def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : Any ):
self.insert_nth(0 ,_UpperCAmelCase )
def __lowercase ( self : str ,_UpperCAmelCase : int ,_UpperCAmelCase : Any ):
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
_a : int = Node(_UpperCAmelCase )
if self.head is None:
_a : str = new_node
elif index == 0:
_a : List[str] = self.head # link new_node to head
_a : Union[str, Any] = new_node
else:
_a : int = self.head
for _ in range(index - 1 ):
_a : Union[str, Any] = temp.next
_a : List[str] = temp.next
_a : Optional[int] = new_node
def __lowercase ( self : Optional[int] ): # print every node data
print(self )
def __lowercase ( self : str ):
return self.delete_nth(0 )
def __lowercase ( self : str ): # delete from tail
return self.delete_nth(len(self ) - 1 )
def __lowercase ( self : List[str] ,_UpperCAmelCase : int = 0 ):
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
_a : Optional[Any] = self.head # default first node
if index == 0:
_a : int = self.head.next
else:
_a : int = self.head
for _ in range(index - 1 ):
_a : str = temp.next
_a : str = temp.next
_a : int = temp.next.next
return delete_node.data
def __lowercase ( self : List[Any] ):
return self.head is None
def __lowercase ( self : Tuple ):
_a : List[Any] = None
_a : Tuple = self.head
while current:
# Store the current node's next node.
_a : Dict = current.next
# Make the current node's next point backwards
_a : str = prev
# Make the previous node be the current node
_a : Tuple = current
# Make the current node the next node (to progress iteration)
_a : Optional[Any] = next_node
# Return prev in order to put the head at the end
_a : int = prev
def __lowerCamelCase ( ) -> None:
_a : List[str] = LinkedList()
assert linked_list.is_empty() is True
assert str(lowerCAmelCase_ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(lowerCAmelCase_ ) == i
linked_list.insert_nth(lowerCAmelCase_ , i + 1 )
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(lowerCAmelCase_ ) == 9
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
_a : Union[str, Any] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(-8 , 1 ) )
def __lowerCamelCase ( ) -> None:
_a : Dict = [
-9,
100,
Node(77345112 ),
'dlrow olleH',
7,
5555,
0,
-192.55_555,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
_a : List[Any] = LinkedList()
for i in test_input:
linked_list.insert_tail(lowerCAmelCase_ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(lowerCAmelCase_ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
_a : List[str] = linked_list.delete_head()
assert result == -9
assert (
str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
_a : Dict = linked_list.delete_tail()
assert result == 12.2
assert (
str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
_a : Optional[Any] = linked_list.delete_nth(10 )
assert result is None
assert (
str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(lowerCAmelCase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(lowerCAmelCase_ )
assert (
str(lowerCAmelCase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(lowerCAmelCase_ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def __lowerCamelCase ( ) -> Union[str, Any]:
from doctest import testmod
testmod()
_a : Optional[int] = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(lowerCAmelCase_ )
print('\nReading/changing Node data using indexing:' )
print(f"""Element at Position 1: {linked_list[1]}""" )
_a : Optional[Any] = input('Enter New Value: ' ).strip()
print('New list:' )
print(lowerCAmelCase_ )
print(f"""length of linked_list is : {len(lowerCAmelCase_ )}""" )
if __name__ == "__main__":
main()
| 89 | 0 |
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
A : Tuple = open # noqa: we just need to have a builtin inside this module to test it properly
| 274 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__lowerCAmelCase = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class __magic_name__ ( unittest.TestCase ):
def __lowercase ( self : str ,_UpperCAmelCase : Path ,_UpperCAmelCase : Union[str, None] = None ,_UpperCAmelCase : Union[List[str], None] = None ,_UpperCAmelCase : Union[str, List[str], None] = None ,_UpperCAmelCase : bool = True ,):
_a : Dict = [file for file in os.listdir(_UpperCAmelCase ) if os.path.isfile(os.path.join(_UpperCAmelCase ,_UpperCAmelCase ) )]
if identifier is not None:
_a : str = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
for n_ in n_identifier:
_a : int = [file for file in files if n_ not in file]
else:
_a : Optional[Any] = [file for file in files if n_identifier not in file]
_a : Dict = ignore_files or []
ignore_files.append('__init__.py' )
_a : List[str] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' ,_UpperCAmelCase )
if only_modules:
_a : Any = file.split('.' )[0]
try:
_a : Optional[int] = getattr(_UpperCAmelCase ,_UpperCAmelCase )
_a : Dict = doctest.DocTestSuite(_UpperCAmelCase )
_a : Optional[int] = unittest.TextTestRunner().run(_UpperCAmelCase )
self.assertIs(len(result.failures ) ,0 )
except AttributeError:
logger.info(F"""{module_identifier} is not a module.""" )
else:
_a : str = doctest.testfile(str('..' / directory / file ) ,optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed ,0 )
def __lowercase ( self : Union[str, Any] ):
_a : Optional[Any] = Path('src/transformers' )
_a : Optional[Any] = 'modeling'
_a : Union[str, Any] = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(_UpperCAmelCase ,identifier=_UpperCAmelCase ,ignore_files=_UpperCAmelCase )
def __lowercase ( self : int ):
_a : str = Path('src/transformers' )
_a : List[str] = 'tokenization'
self.analyze_directory(_UpperCAmelCase ,identifier=_UpperCAmelCase )
def __lowercase ( self : int ):
_a : Any = Path('src/transformers' )
_a : str = 'configuration'
self.analyze_directory(_UpperCAmelCase ,identifier=_UpperCAmelCase )
def __lowercase ( self : Dict ):
_a : Tuple = Path('src/transformers' )
_a : Optional[int] = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(_UpperCAmelCase ,n_identifier=_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ):
_a : Union[str, Any] = Path('docs/source' )
_a : List[str] = ['favicon.ico']
self.analyze_directory(_UpperCAmelCase ,ignore_files=_UpperCAmelCase ,only_modules=_UpperCAmelCase )
| 89 | 0 |
from typing import Any
class a__ :
def __init__( self : List[Any],_A : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = data
SCREAMING_SNAKE_CASE_ : Union[str, Any] = None
def __repr__( self : Any ):
"""simple docstring"""
return F'Node({self.data})'
class a__ :
def __init__( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = None
def __iter__( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.head
while node:
yield node.data
SCREAMING_SNAKE_CASE_ : Union[str, Any] = node.next
def __len__( self : Optional[Any] ):
"""simple docstring"""
return sum(1 for _ in self )
def __repr__( self : str ):
"""simple docstring"""
return "->".join([str(_UpperCAmelCase ) for item in self] )
def __getitem__( self : Tuple,_A : int ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : Union[str, Any],_A : int,_A : Any ):
"""simple docstring"""
if not 0 <= index < len(self ):
raise ValueError("list index out of range." )
SCREAMING_SNAKE_CASE_ : Any = self.head
for _ in range(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = current.next
SCREAMING_SNAKE_CASE_ : Optional[int] = data
def __UpperCamelCase ( self : Optional[int],_A : Any ):
"""simple docstring"""
self.insert_nth(len(self ),_UpperCAmelCase )
def __UpperCamelCase ( self : Union[str, Any],_A : Any ):
"""simple docstring"""
self.insert_nth(0,_UpperCAmelCase )
def __UpperCamelCase ( self : str,_A : int,_A : Any ):
"""simple docstring"""
if not 0 <= index <= len(self ):
raise IndexError("list index out of range" )
SCREAMING_SNAKE_CASE_ : int = Node(_UpperCAmelCase )
if self.head is None:
SCREAMING_SNAKE_CASE_ : str = new_node
elif index == 0:
SCREAMING_SNAKE_CASE_ : List[str] = self.head # link new_node to head
SCREAMING_SNAKE_CASE_ : Union[str, Any] = new_node
else:
SCREAMING_SNAKE_CASE_ : int = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = temp.next
SCREAMING_SNAKE_CASE_ : List[str] = temp.next
SCREAMING_SNAKE_CASE_ : Optional[int] = new_node
def __UpperCamelCase ( self : Optional[int] ): # print every node data
"""simple docstring"""
print(self )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
return self.delete_nth(0 )
def __UpperCamelCase ( self : str ): # delete from tail
"""simple docstring"""
return self.delete_nth(len(self ) - 1 )
def __UpperCamelCase ( self : List[str],_A : int = 0 ):
"""simple docstring"""
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError("List index out of range." )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.head # default first node
if index == 0:
SCREAMING_SNAKE_CASE_ : int = self.head.next
else:
SCREAMING_SNAKE_CASE_ : int = self.head
for _ in range(index - 1 ):
SCREAMING_SNAKE_CASE_ : str = temp.next
SCREAMING_SNAKE_CASE_ : str = temp.next
SCREAMING_SNAKE_CASE_ : int = temp.next.next
return delete_node.data
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
return self.head is None
def __UpperCamelCase ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = None
SCREAMING_SNAKE_CASE_ : Tuple = self.head
while current:
# Store the current node's next node.
SCREAMING_SNAKE_CASE_ : Dict = current.next
# Make the current node's next point backwards
SCREAMING_SNAKE_CASE_ : str = prev
# Make the previous node be the current node
SCREAMING_SNAKE_CASE_ : Tuple = current
# Make the current node the next node (to progress iteration)
SCREAMING_SNAKE_CASE_ : Optional[Any] = next_node
# Return prev in order to put the head at the end
SCREAMING_SNAKE_CASE_ : int = prev
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = LinkedList()
assert linked_list.is_empty() is True
assert str(lowerCAmelCase_ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(1_0 ):
assert len(lowerCAmelCase_ ) == i
linked_list.insert_nth(lowerCAmelCase_ , i + 1 )
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(1 , 1_1 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(1_1 )
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(0 , 1_2 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 1_0
assert linked_list.delete_tail() == 1_1
assert len(lowerCAmelCase_ ) == 9
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(1 , 1_0 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(-8 , 1 ) )
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = [
-9,
1_0_0,
Node(7_7_3_4_5_1_1_2 ),
'dlrow olleH',
7,
5_5_5_5,
0,
-192.55555,
'Hello, world!',
77.9,
Node(1_0 ),
None,
None,
12.20,
]
SCREAMING_SNAKE_CASE_ : List[Any] = LinkedList()
for i in test_input:
linked_list.insert_tail(lowerCAmelCase_ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(lowerCAmelCase_ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
SCREAMING_SNAKE_CASE_ : List[str] = linked_list.delete_head()
assert result == -9
assert (
str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
SCREAMING_SNAKE_CASE_ : Dict = linked_list.delete_tail()
assert result == 12.2
assert (
str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
SCREAMING_SNAKE_CASE_ : Optional[Any] = linked_list.delete_nth(1_0 )
assert result is None
assert (
str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node("Hello again, world!" ) )
assert (
str(lowerCAmelCase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(lowerCAmelCase_ )
assert (
str(lowerCAmelCase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(lowerCAmelCase_ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def _snake_case ( ):
"""simple docstring"""
from doctest import testmod
testmod()
SCREAMING_SNAKE_CASE_ : Optional[int] = LinkedList()
linked_list.insert_head(input("Inserting 1st at head " ).strip() )
linked_list.insert_head(input("Inserting 2nd at head " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
linked_list.insert_tail(input("\nInserting 1st at tail " ).strip() )
linked_list.insert_tail(input("Inserting 2nd at tail " ).strip() )
print("\nPrint list:" )
linked_list.print_list()
print("\nDelete head" )
linked_list.delete_head()
print("Delete tail" )
linked_list.delete_tail()
print("\nPrint list:" )
linked_list.print_list()
print("\nReverse linked list" )
linked_list.reverse()
print("\nPrint list:" )
linked_list.print_list()
print("\nString representation of linked list:" )
print(lowerCAmelCase_ )
print("\nReading/changing Node data using indexing:" )
print(f'Element at Position 1: {linked_list[1]}' )
SCREAMING_SNAKE_CASE_ : Optional[Any] = input("Enter New Value: " ).strip()
print("New list:" )
print(lowerCAmelCase_ )
print(f'length of linked_list is : {len(lowerCAmelCase_ )}' )
if __name__ == "__main__":
main()
| 18 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
__lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def __lowerCamelCase ( lowerCAmelCase_ ) -> Optional[Any]:
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
_a : List[Any] = model_type_to_module_name(lowerCAmelCase_ )
_a : Optional[Any] = importlib.import_module(f""".{module_name}""" , 'transformers.models' )
try:
return getattr(lowerCAmelCase_ , lowerCAmelCase_ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(lowerCAmelCase_ , '__name__' , lowerCAmelCase_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_a : Dict = importlib.import_module('transformers' )
if hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
return getattr(lowerCAmelCase_ , lowerCAmelCase_ )
return None
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , **lowerCAmelCase_ , ) -> Tuple:
_a : List[str] = get_file_from_repo(
lowerCAmelCase_ , lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , resume_download=lowerCAmelCase_ , proxies=lowerCAmelCase_ , use_auth_token=lowerCAmelCase_ , revision=lowerCAmelCase_ , local_files_only=lowerCAmelCase_ , )
if resolved_config_file is None:
logger.info(
'Could not locate the image processor configuration file, will try to use the model config instead.' )
return {}
with open(lowerCAmelCase_ , encoding='utf-8' ) as reader:
return json.load(lowerCAmelCase_ )
class __magic_name__ :
def __init__( self : List[str] ):
raise EnvironmentError(
'AutoImageProcessor is designed to be instantiated '
'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(_UpperCAmelCase )
def __lowercase ( cls : Dict ,_UpperCAmelCase : Union[str, Any] ,**_UpperCAmelCase : Optional[Any] ):
_a : Any = kwargs.pop('config' ,_UpperCAmelCase )
_a : Dict = kwargs.pop('trust_remote_code' ,_UpperCAmelCase )
_a : Any = True
_a , _a : Tuple = ImageProcessingMixin.get_image_processor_dict(_UpperCAmelCase ,**_UpperCAmelCase )
_a : List[Any] = config_dict.get('image_processor_type' ,_UpperCAmelCase )
_a : int = None
if "AutoImageProcessor" in config_dict.get('auto_map' ,{} ):
_a : Any = config_dict['auto_map']['AutoImageProcessor']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
_a : List[Any] = config_dict.pop('feature_extractor_type' ,_UpperCAmelCase )
if feature_extractor_class is not None:
logger.warning(
'Could not find image processor class in the image processor config or the model config. Loading'
' based on pattern matching with the model\'s feature extractor configuration.' )
_a : Optional[int] = feature_extractor_class.replace('FeatureExtractor' ,'ImageProcessor' )
if "AutoFeatureExtractor" in config_dict.get('auto_map' ,{} ):
_a : List[Any] = config_dict['auto_map']['AutoFeatureExtractor']
_a : List[str] = feature_extractor_auto_map.replace('FeatureExtractor' ,'ImageProcessor' )
logger.warning(
'Could not find image processor auto map in the image processor config or the model config.'
' Loading based on pattern matching with the model\'s feature extractor configuration.' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : Dict = AutoConfig.from_pretrained(_UpperCAmelCase ,**_UpperCAmelCase )
# It could be in `config.image_processor_type``
_a : Optional[int] = getattr(_UpperCAmelCase ,'image_processor_type' ,_UpperCAmelCase )
if hasattr(_UpperCAmelCase ,'auto_map' ) and "AutoImageProcessor" in config.auto_map:
_a : Union[str, Any] = config.auto_map['AutoImageProcessor']
if image_processor_class is not None:
_a : Optional[int] = image_processor_class_from_name(_UpperCAmelCase )
_a : List[str] = image_processor_auto_map is not None
_a : Optional[int] = image_processor_class is not None or type(_UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING
_a : Optional[int] = resolve_trust_remote_code(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
if has_remote_code and trust_remote_code:
_a : Dict = get_class_from_dynamic_module(
_UpperCAmelCase ,_UpperCAmelCase ,**_UpperCAmelCase )
_a : int = kwargs.pop('code_revision' ,_UpperCAmelCase )
if os.path.isdir(_UpperCAmelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(_UpperCAmelCase ,**_UpperCAmelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(_UpperCAmelCase ,**_UpperCAmelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(_UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING:
_a : Dict = IMAGE_PROCESSOR_MAPPING[type(_UpperCAmelCase )]
return image_processor_class.from_dict(_UpperCAmelCase ,**_UpperCAmelCase )
raise ValueError(
F"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """
F"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def __lowercase ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Dict ):
IMAGE_PROCESSOR_MAPPING.register(_UpperCAmelCase ,_UpperCAmelCase )
| 89 | 0 |
import logging
import os
from typing import Dict, List, Optional, Union
import torch
import torch.nn as nn
from accelerate.utils.imports import (
is_abit_bnb_available,
is_abit_bnb_available,
is_bnb_available,
)
from ..big_modeling import dispatch_model, init_empty_weights
from .dataclasses import BnbQuantizationConfig
from .modeling import (
find_tied_parameters,
get_balanced_memory,
infer_auto_device_map,
load_checkpoint_in_model,
offload_weight,
set_module_tensor_to_device,
)
if is_bnb_available():
import bitsandbytes as bnb
from copy import deepcopy
lowercase__ : Dict = logging.getLogger(__name__)
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = None , snake_case__ = False , ) -> Dict:
lowerCAmelCase = bnb_quantization_config.load_in_abit
lowerCAmelCase = bnb_quantization_config.load_in_abit
if load_in_abit and not is_abit_bnb_available():
raise ImportError(
'''You have a version of `bitsandbytes` that is not compatible with 8bit quantization,'''
''' make sure you have the latest version of `bitsandbytes` installed.''' )
if load_in_abit and not is_abit_bnb_available():
raise ValueError(
'''You have a version of `bitsandbytes` that is not compatible with 4bit quantization,'''
'''make sure you have the latest version of `bitsandbytes` installed.''' )
lowerCAmelCase = []
# custom device map
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(device_map.keys() ) > 1:
lowerCAmelCase = [key for key, value in device_map.items() if value in ['disk', 'cpu']]
# We keep some modules such as the lm_head in their original dtype for numerical stability reasons
if bnb_quantization_config.skip_modules is None:
lowerCAmelCase = get_keys_to_not_convert(lowerCAmelCase_ )
# add cpu modules to skip modules only for 4-bit modules
if load_in_abit:
bnb_quantization_config.skip_modules.extend(lowerCAmelCase_ )
lowerCAmelCase = bnb_quantization_config.skip_modules
# We add the modules we want to keep in full precision
if bnb_quantization_config.keep_in_fpaa_modules is None:
lowerCAmelCase = []
lowerCAmelCase = bnb_quantization_config.keep_in_fpaa_modules
modules_to_not_convert.extend(lowerCAmelCase_ )
# compatibility with peft
lowerCAmelCase = load_in_abit
lowerCAmelCase = load_in_abit
lowerCAmelCase = get_parameter_device(lowerCAmelCase_ )
if model_device.type != "meta":
# quantization of an already loaded model
logger.warning(
'''It is not recommended to quantize a loaded model. '''
'''The model should be instantiated under the `init_empty_weights` context manager.''' )
lowerCAmelCase = replace_with_bnb_layers(lowerCAmelCase_ , lowerCAmelCase_ , modules_to_not_convert=lowerCAmelCase_ )
# convert param to the right dtype
lowerCAmelCase = bnb_quantization_config.torch_dtype
for name, param in model.state_dict().items():
if any(module_to_keep_in_fpaa in name for module_to_keep_in_fpaa in keep_in_fpaa_modules ):
param.to(torch.floataa )
if param.dtype != torch.floataa:
lowerCAmelCase = name.replace('''.weight''' , '''''' ).replace('''.bias''' , '''''' )
lowerCAmelCase = getattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if param is not None:
param.to(torch.floataa )
elif torch.is_floating_point(lowerCAmelCase_ ):
param.to(lowerCAmelCase_ )
if model_device.type == "cuda":
# move everything to cpu in the first place because we can't do quantization if the weights are already on cuda
model.cuda(torch.cuda.current_device() )
torch.cuda.empty_cache()
elif torch.cuda.is_available():
model.to(torch.cuda.current_device() )
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info(
f"The model device type is {model_device.type}. However, cuda is needed for quantization."
'''We move the model to cuda.''' )
return model
elif weights_location is None:
raise RuntimeError(
f"`weights_location` needs to be the folder path containing the weights of the model, but we found {weights_location} " )
else:
with init_empty_weights():
lowerCAmelCase = replace_with_bnb_layers(
lowerCAmelCase_ , lowerCAmelCase_ , modules_to_not_convert=lowerCAmelCase_ )
lowerCAmelCase = get_quantized_model_device_map(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , max_memory=lowerCAmelCase_ , no_split_module_classes=lowerCAmelCase_ , )
if offload_state_dict is None and device_map is not None and "disk" in device_map.values():
lowerCAmelCase = True
lowerCAmelCase = any(x in list(device_map.values() ) for x in ['''cpu''', '''disk'''] )
load_checkpoint_in_model(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , dtype=bnb_quantization_config.torch_dtype , offload_folder=lowerCAmelCase_ , offload_state_dict=lowerCAmelCase_ , keep_in_fpaa_modules=bnb_quantization_config.keep_in_fpaa_modules , offload_abit_bnb=load_in_abit and offload , )
return dispatch_model(lowerCAmelCase_ , device_map=lowerCAmelCase_ , offload_dir=lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , snake_case__=None ) -> Optional[int]:
if device_map is None:
if torch.cuda.is_available():
lowerCAmelCase = {'': torch.cuda.current_device()}
else:
raise RuntimeError('''No GPU found. A GPU is needed for quantization.''' )
logger.info('''The device_map was not initialized.''' '''Setting device_map to `{\'\':torch.cuda.current_device()}`.''' )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
if device_map not in ["auto", "balanced", "balanced_low_0", "sequential"]:
raise ValueError(
'''If passing a string for `device_map`, please choose \'auto\', \'balanced\', \'balanced_low_0\' or '''
'''\'sequential\'.''' )
lowerCAmelCase = {}
special_dtypes.update(
{
name: bnb_quantization_config.torch_dtype
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.skip_modules )
} )
special_dtypes.update(
{
name: torch.floataa
for name, _ in model.named_parameters()
if any(m in name for m in bnb_quantization_config.keep_in_fpaa_modules )
} )
lowerCAmelCase = {}
lowerCAmelCase = special_dtypes
lowerCAmelCase = no_split_module_classes
lowerCAmelCase = bnb_quantization_config.target_dtype
# get max_memory for each device.
if device_map != "sequential":
lowerCAmelCase = get_balanced_memory(
lowerCAmelCase_ , low_zero=(device_map == '''balanced_low_0''') , max_memory=lowerCAmelCase_ , **lowerCAmelCase_ , )
lowerCAmelCase = max_memory
lowerCAmelCase = infer_auto_device_map(lowerCAmelCase_ , **lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
# check if don't have any quantized module on the cpu
lowerCAmelCase = bnb_quantization_config.skip_modules + bnb_quantization_config.keep_in_fpaa_modules
lowerCAmelCase = {
key: device_map[key] for key in device_map.keys() if key not in modules_not_to_convert
}
for device in ["cpu", "disk"]:
if device in device_map_without_some_modules.values():
if bnb_quantization_config.load_in_abit:
raise ValueError(
'''\n Some modules are dispatched on the CPU or the disk. Make sure you have enough GPU RAM to fit\n the quantized model. If you want to dispatch the model on the CPU or the disk while keeping\n these modules in `torch_dtype`, you need to pass a custom `device_map` to\n `load_and_quantize_model`. Check\n https://huggingface.co/docs/accelerate/main/en/usage_guides/quantization#offload-modules-to-cpu-and-disk\n for more details.\n ''' )
else:
logger.info(
'''Some modules are are offloaded to the CPU or the disk. Note that these modules will be converted to 8-bit''' )
del device_map_without_some_modules
return device_map
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None ) -> Tuple:
if modules_to_not_convert is None:
lowerCAmelCase = []
lowerCAmelCase = _replace_with_bnb_layers(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
if not has_been_replaced:
logger.warning(
'''You are loading your model in 8bit or 4bit but no linear modules were found in your model.'''
''' this can happen for some architectures such as gpt2 that uses Conv1D instead of Linear layers.'''
''' Please double check your model architecture, or submit an issue on github if you think this is'''
''' a bug.''' )
return model
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__=None , snake_case__=None , ) -> List[Any]:
lowerCAmelCase = False
for name, module in model.named_children():
if current_key_name is None:
lowerCAmelCase = []
current_key_name.append(lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , nn.Linear ) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
lowerCAmelCase = '.'.join(lowerCAmelCase_ )
lowerCAmelCase = True
for key in modules_to_not_convert:
if (
(key in current_key_name_str) and (key + "." in current_key_name_str)
) or key == current_key_name_str:
lowerCAmelCase = False
break
if proceed:
# Load bnb module with empty weight and replace ``nn.Linear` module
if bnb_quantization_config.load_in_abit:
lowerCAmelCase = bnb.nn.LinearabitLt(
module.in_features , module.out_features , module.bias is not None , has_fpaa_weights=lowerCAmelCase_ , threshold=bnb_quantization_config.llm_inta_threshold , )
elif bnb_quantization_config.load_in_abit:
lowerCAmelCase = bnb.nn.Linearabit(
module.in_features , module.out_features , module.bias is not None , bnb_quantization_config.bnb_abit_compute_dtype , compress_statistics=bnb_quantization_config.bnb_abit_use_double_quant , quant_type=bnb_quantization_config.bnb_abit_quant_type , )
else:
raise ValueError('''load_in_8bit and load_in_4bit can\'t be both False''' )
lowerCAmelCase = module.weight.data
if module.bias is not None:
lowerCAmelCase = module.bias.data
bnb_module.requires_grad_(lowerCAmelCase_ )
setattr(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase = True
if len(list(module.children() ) ) > 0:
lowerCAmelCase = _replace_with_bnb_layers(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
lowerCAmelCase = has_been_replaced | _has_been_replaced
# Remove the last key for recursion
current_key_name.pop(-1 )
return model, has_been_replaced
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> int:
# Create a copy of the model
with init_empty_weights():
lowerCAmelCase = deepcopy(lowerCAmelCase_ ) # this has 0 cost since it is done inside `init_empty_weights` context manager`
lowerCAmelCase = find_tied_parameters(lowerCAmelCase_ )
# For compatibility with Accelerate < 0.18
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
lowerCAmelCase = sum(list(tied_params.values() ) , [] ) + list(tied_params.keys() )
else:
lowerCAmelCase = sum(lowerCAmelCase_ , [] )
lowerCAmelCase = len(lowerCAmelCase_ ) > 0
# Check if it is a base model
lowerCAmelCase = False
if hasattr(lowerCAmelCase_ , '''base_model_prefix''' ):
lowerCAmelCase = not hasattr(lowerCAmelCase_ , model.base_model_prefix )
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
lowerCAmelCase = list(model.named_children() )
lowerCAmelCase = [list_modules[-1][0]]
# add last module together with tied weights
lowerCAmelCase = set(lowerCAmelCase_ ) - set(lowerCAmelCase_ )
lowerCAmelCase = list(set(lowerCAmelCase_ ) ) + list(lowerCAmelCase_ )
# remove ".weight" from the keys
lowerCAmelCase = ['.weight', '.bias']
lowerCAmelCase = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
lowerCAmelCase = name.replace(lowerCAmelCase_ , '''''' )
filtered_module_names.append(lowerCAmelCase_ )
return filtered_module_names
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Optional[Any]:
for m in model.modules():
if isinstance(lowerCAmelCase_ , bnb.nn.Linearabit ):
return True
return False
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Optional[Any]:
return next(parameter.parameters() ).device
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ ) -> List[Any]:
# if it is not quantized, we quantize and offload the quantized weights and the SCB stats
if fpaa_statistics is None:
set_module_tensor_to_device(lowerCAmelCase_ , lowerCAmelCase_ , 0 , dtype=lowerCAmelCase_ , value=lowerCAmelCase_ )
lowerCAmelCase = param_name
lowerCAmelCase = model
if "." in tensor_name:
lowerCAmelCase = tensor_name.split('''.''' )
for split in splits[:-1]:
lowerCAmelCase = getattr(lowerCAmelCase_ , lowerCAmelCase_ )
if new_module is None:
raise ValueError(f"{module} has no attribute {split}." )
lowerCAmelCase = new_module
lowerCAmelCase = splits[-1]
# offload weights
lowerCAmelCase = False
offload_weight(module._parameters[tensor_name] , lowerCAmelCase_ , lowerCAmelCase_ , index=lowerCAmelCase_ )
if hasattr(module._parameters[tensor_name] , '''SCB''' ):
offload_weight(
module._parameters[tensor_name].SCB , param_name.replace('''weight''' , '''SCB''' ) , lowerCAmelCase_ , index=lowerCAmelCase_ , )
else:
offload_weight(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , index=lowerCAmelCase_ )
offload_weight(lowerCAmelCase_ , param_name.replace('''weight''' , '''SCB''' ) , lowerCAmelCase_ , index=lowerCAmelCase_ )
set_module_tensor_to_device(lowerCAmelCase_ , lowerCAmelCase_ , '''meta''' , dtype=lowerCAmelCase_ , value=torch.empty(*param.size() ) )
| 338 |
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__lowerCAmelCase = None
__lowerCAmelCase = '''<''' if sys.byteorder == '''little''' else '''>'''
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__lowerCAmelCase = [
np.dtype('''|b1'''),
np.dtype('''|u1'''),
np.dtype('''<u2'''),
np.dtype('''>u2'''),
np.dtype('''<i2'''),
np.dtype('''>i2'''),
np.dtype('''<u4'''),
np.dtype('''>u4'''),
np.dtype('''<i4'''),
np.dtype('''>i4'''),
np.dtype('''<f4'''),
np.dtype('''>f4'''),
np.dtype('''<f8'''),
np.dtype('''>f8'''),
]
@dataclass
class __magic_name__ :
lowerCAmelCase : bool = True
lowerCAmelCase : Optional[str] = None
# Automatically constructed
lowerCAmelCase : ClassVar[str] = "PIL.Image.Image"
lowerCAmelCase : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
lowerCAmelCase : str = field(default='Image' , init=_UpperCamelCase , repr=_UpperCamelCase )
def __call__( self : Union[str, Any] ):
return self.pa_type
def __lowercase ( self : Any ,_UpperCAmelCase : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : Optional[Any] = np.array(_UpperCAmelCase )
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
return {"path": value, "bytes": None}
elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
return {"path": None, "bytes": value}
elif isinstance(_UpperCAmelCase ,np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase ,PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_UpperCAmelCase )
elif value.get('path' ) is not None and os.path.isfile(value['path'] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('path' )}
elif value.get('bytes' ) is not None or value.get('path' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('bytes' ), "path": value.get('path' )}
else:
raise ValueError(
F"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : dict ,_UpperCAmelCase : Optional[int]=None ):
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Image(decode=True) instead.' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support decoding images, please install \'Pillow\'.' )
if token_per_repo_id is None:
_a : Dict = {}
_a , _a : str = value['path'], value['bytes']
if bytes_ is None:
if path is None:
raise ValueError(F"""An image should have one of 'path' or 'bytes' but both are None in {value}.""" )
else:
if is_local_path(_UpperCAmelCase ):
_a : Any = PIL.Image.open(_UpperCAmelCase )
else:
_a : List[Any] = path.split('::' )[-1]
try:
_a : str = string_to_dict(_UpperCAmelCase ,config.HUB_DATASETS_URL )['repo_id']
_a : Optional[Any] = token_per_repo_id.get(_UpperCAmelCase )
except ValueError:
_a : int = None
with xopen(_UpperCAmelCase ,'rb' ,use_auth_token=_UpperCAmelCase ) as f:
_a : Tuple = BytesIO(f.read() )
_a : Union[str, Any] = PIL.Image.open(bytes_ )
else:
_a : Optional[int] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def __lowercase ( self : int ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('binary' ),
"path": Value('string' ),
}
)
def __lowercase ( self : str ,_UpperCAmelCase : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
if pa.types.is_string(storage.type ):
_a : Union[str, Any] = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.binary() )
_a : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, storage] ,['bytes', 'path'] ,mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_a : List[str] = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() )
_a : Any = pa.StructArray.from_arrays([storage, path_array] ,['bytes', 'path'] ,mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('bytes' ) >= 0:
_a : Union[str, Any] = storage.field('bytes' )
else:
_a : Tuple = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.binary() )
if storage.type.get_field_index('path' ) >= 0:
_a : Union[str, Any] = storage.field('path' )
else:
_a : Dict = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() )
_a : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] ,['bytes', 'path'] ,mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
_a : List[str] = pa.array(
[encode_np_array(np.array(_UpperCAmelCase ) )['bytes'] if arr is not None else None for arr in storage.to_pylist()] ,type=pa.binary() ,)
_a : int = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() )
_a : Optional[Any] = pa.StructArray.from_arrays(
[bytes_array, path_array] ,['bytes', 'path'] ,mask=bytes_array.is_null() )
return array_cast(_UpperCAmelCase ,self.pa_type )
def __lowercase ( self : Dict ,_UpperCAmelCase : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(_UpperCAmelCase : Tuple ):
with xopen(_UpperCAmelCase ,'rb' ) as f:
_a : int = f.read()
return bytes_
_a : Any = pa.array(
[
(path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] ,type=pa.binary() ,)
_a : Optional[Any] = pa.array(
[os.path.basename(_UpperCAmelCase ) if path is not None else None for path in storage.field('path' ).to_pylist()] ,type=pa.string() ,)
_a : Dict = pa.StructArray.from_arrays([bytes_array, path_array] ,['bytes', 'path'] ,mask=bytes_array.is_null() )
return array_cast(_UpperCAmelCase ,self.pa_type )
def __lowerCamelCase ( ) -> List[str]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
_a : Dict = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def __lowerCamelCase ( lowerCAmelCase_ ) -> bytes:
_a : Optional[int] = BytesIO()
if image.format in list_image_compression_formats():
_a : Optional[Any] = image.format
else:
_a : str = 'PNG' if image.mode in ['1', 'L', 'LA', 'RGB', 'RGBA'] else 'TIFF'
image.save(lowerCAmelCase_ , format=lowerCAmelCase_ )
return buffer.getvalue()
def __lowerCamelCase ( lowerCAmelCase_ ) -> dict:
if hasattr(lowerCAmelCase_ , 'filename' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(lowerCAmelCase_ )}
def __lowerCamelCase ( lowerCAmelCase_ ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
_a : List[Any] = array.dtype
_a : Optional[int] = dtype.byteorder if dtype.byteorder != '=' else _NATIVE_BYTEORDER
_a : Union[str, Any] = dtype.kind
_a : Union[str, Any] = dtype.itemsize
_a : List[Any] = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
_a : Optional[int] = np.dtype('|u1' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" )
if dtype is not dest_dtype:
warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
_a : Union[str, Any] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
_a : str = dtype_byteorder + dtype_kind + str(lowerCAmelCase_ )
_a : List[Any] = np.dtype(lowerCAmelCase_ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" )
_a : Union[str, Any] = PIL.Image.fromarray(array.astype(lowerCAmelCase_ ) )
return {"path": None, "bytes": image_to_bytes(lowerCAmelCase_ )}
def __lowerCamelCase ( lowerCAmelCase_ ) -> List[dict]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if objs:
_a , _a : Optional[Any] = first_non_null_value(lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(lowerCAmelCase_ , np.ndarray ):
_a : List[str] = no_op_if_value_is_null(lowerCAmelCase_ )
return [obj_to_image_dict_func(lowerCAmelCase_ ) for obj in objs]
elif isinstance(lowerCAmelCase_ , PIL.Image.Image ):
_a : List[str] = no_op_if_value_is_null(lowerCAmelCase_ )
return [obj_to_image_dict_func(lowerCAmelCase_ ) for obj in objs]
else:
return objs
else:
return objs
| 89 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__snake_case : Optional[Any] = {
'configuration_mctct': ['MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MCTCTConfig'],
'feature_extraction_mctct': ['MCTCTFeatureExtractor'],
'processing_mctct': ['MCTCTProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Optional[int] = [
'MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MCTCTForCTC',
'MCTCTModel',
'MCTCTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
__snake_case : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 269 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str | Literal[False]:
_a : Optional[int] = list(lowerCAmelCase_ )
_a : Optional[Any] = list(lowerCAmelCase_ )
_a : Union[str, Any] = 0
for i in range(len(lowerCAmelCase_ ) ):
if lista[i] != lista[i]:
count += 1
_a : Optional[int] = '_'
if count > 1:
return False
else:
return "".join(lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ ) -> list[str]:
_a : Optional[int] = []
while True:
_a : Any = ['$'] * len(lowerCAmelCase_ )
_a : List[str] = []
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
_a : Optional[int] = compare_string(binary[i] , binary[j] )
if k is False:
_a : Optional[Any] = '*'
_a : Optional[Any] = '*'
temp.append('X' )
for i in range(len(lowerCAmelCase_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(lowerCAmelCase_ ) == 0:
return pi
_a : Any = list(set(lowerCAmelCase_ ) )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]:
_a : int = []
for minterm in minterms:
_a : Optional[int] = ''
for _ in range(lowerCAmelCase_ ):
_a : Union[str, Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(lowerCAmelCase_ )
return temp
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> bool:
_a : int = list(lowerCAmelCase_ )
_a : Union[str, Any] = list(lowerCAmelCase_ )
_a : str = 0
for i in range(len(lowerCAmelCase_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]:
_a : List[Any] = []
_a : Optional[Any] = [0] * len(lowerCAmelCase_ )
for i in range(len(chart[0] ) ):
_a : Union[str, Any] = 0
_a : int = -1
for j in range(len(lowerCAmelCase_ ) ):
if chart[j][i] == 1:
count += 1
_a : int = j
if count == 1:
_a : List[Any] = 1
for i in range(len(lowerCAmelCase_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(lowerCAmelCase_ ) ):
_a : Any = 0
temp.append(prime_implicants[i] )
while True:
_a : Union[str, Any] = 0
_a : List[Any] = -1
_a : str = 0
for i in range(len(lowerCAmelCase_ ) ):
_a : Union[str, Any] = chart[i].count(1 )
if count_n > max_n:
_a : Any = count_n
_a : int = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(lowerCAmelCase_ ) ):
_a : List[str] = 0
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[list[int]]:
_a : int = [[0 for x in range(len(lowerCAmelCase_ ) )] for x in range(len(lowerCAmelCase_ ) )]
for i in range(len(lowerCAmelCase_ ) ):
_a : str = prime_implicants[i].count('_' )
for j in range(len(lowerCAmelCase_ ) ):
if is_for_table(prime_implicants[i] , binary[j] , lowerCAmelCase_ ):
_a : Optional[Any] = 1
return chart
def __lowerCamelCase ( ) -> None:
_a : Optional[int] = int(input('Enter the no. of variables\n' ) )
_a : List[Any] = [
float(lowerCAmelCase_ )
for x in input(
'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split()
]
_a : List[str] = decimal_to_binary(lowerCAmelCase_ , lowerCAmelCase_ )
_a : Dict = check(lowerCAmelCase_ )
print('Prime Implicants are:' )
print(lowerCAmelCase_ )
_a : List[Any] = prime_implicant_chart(lowerCAmelCase_ , lowerCAmelCase_ )
_a : int = selection(lowerCAmelCase_ , lowerCAmelCase_ )
print('Essential Prime Implicants are:' )
print(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 89 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self , A_ , A_=7 , A_=3 , A_=18 , A_=30 , A_=400 , A_=True , A_=32 , A_=True , ):
'''simple docstring'''
UpperCamelCase : str = parent
UpperCamelCase : Union[str, Any] = batch_size
UpperCamelCase : Tuple = num_channels
UpperCamelCase : Optional[Any] = image_size
UpperCamelCase : List[Any] = min_resolution
UpperCamelCase : Dict = max_resolution
UpperCamelCase : Optional[Any] = do_resize
UpperCamelCase : Dict = size_divisor
UpperCamelCase : Union[str, Any] = do_rescale
def __UpperCamelCase( self ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class A__ ( _UpperCamelCase , unittest.TestCase ):
_UpperCAmelCase :str = GLPNImageProcessor if is_vision_available() else None
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : List[Any] = GLPNImageProcessingTester(self )
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "size_divisor" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "resample" ) )
self.assertTrue(hasattr(_UpperCAmelCase , "do_rescale" ) )
def __UpperCamelCase( self ):
'''simple docstring'''
pass
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCamelCase : Union[str, Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCamelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def __UpperCamelCase( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCamelCase : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_UpperCAmelCase , torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
UpperCamelCase : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
| 52 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
'''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''],
'''tokenization_cpmant''': ['''CpmAntTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CpmAntForCausalLM''',
'''CpmAntModel''',
'''CpmAntPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89 | 0 |
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class __snake_case :
def __init__( self : Union[str, Any] , _lowercase : Dict , _lowercase : Tuple=2 , _lowercase : Optional[int]=32 , _lowercase : Any=16 , _lowercase : Tuple=3 , _lowercase : Any=True , _lowercase : List[Any]=True , _lowercase : Union[str, Any]=32 , _lowercase : Optional[int]=4 , _lowercase : str=[0, 1, 2, 3] , _lowercase : Optional[Any]=4 , _lowercase : int=37 , _lowercase : int="gelu" , _lowercase : Tuple=0.1 , _lowercase : Dict=0.1 , _lowercase : List[Any]=0.02 , _lowercase : str=3 , _lowercase : Dict=[1, 3_84, 24, 24] , _lowercase : str=True , _lowercase : Any=None , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = parent
SCREAMING_SNAKE_CASE__ = batch_size
SCREAMING_SNAKE_CASE__ = image_size
SCREAMING_SNAKE_CASE__ = patch_size
SCREAMING_SNAKE_CASE__ = num_channels
SCREAMING_SNAKE_CASE__ = is_training
SCREAMING_SNAKE_CASE__ = use_labels
SCREAMING_SNAKE_CASE__ = hidden_size
SCREAMING_SNAKE_CASE__ = num_hidden_layers
SCREAMING_SNAKE_CASE__ = backbone_out_indices
SCREAMING_SNAKE_CASE__ = num_attention_heads
SCREAMING_SNAKE_CASE__ = intermediate_size
SCREAMING_SNAKE_CASE__ = hidden_act
SCREAMING_SNAKE_CASE__ = hidden_dropout_prob
SCREAMING_SNAKE_CASE__ = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE__ = initializer_range
SCREAMING_SNAKE_CASE__ = num_labels
SCREAMING_SNAKE_CASE__ = backbone_featmap_shape
SCREAMING_SNAKE_CASE__ = scope
SCREAMING_SNAKE_CASE__ = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE__ = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE__ = num_patches + 1
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ = None
if self.use_labels:
SCREAMING_SNAKE_CASE__ = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ = self.get_config()
return config, pixel_values, labels
def __a ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = {
'global_padding': 'same',
'layer_type': 'bottleneck',
'depths': [3, 4, 9],
'out_features': ['stage1', 'stage2', 'stage3'],
'embedding_dynamic_padding': True,
'hidden_sizes': [96, 1_92, 3_84, 7_68],
'num_groups': 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=_UpperCAmelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def __a ( self : Any , _lowercase : str , _lowercase : Union[str, Any] , _lowercase : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = DPTModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __a ( self : str , _lowercase : Dict , _lowercase : List[str] , _lowercase : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = DPTForDepthEstimation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(_UpperCAmelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def __a ( self : str , _lowercase : str , _lowercase : str , _lowercase : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.num_labels
SCREAMING_SNAKE_CASE__ = DPTForSemanticSegmentation(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ = model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __a ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ = config_and_inputs
SCREAMING_SNAKE_CASE__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase_ = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
lowerCAmelCase_ = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ = False
lowerCAmelCase_ = False
lowerCAmelCase_ = False
def __a ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = DPTModelTester(self )
SCREAMING_SNAKE_CASE__ = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def __a ( self : Dict ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def __a ( self : List[Any] ):
"""simple docstring"""
pass
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase , nn.Linear ) )
def __a ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ = ['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def __a ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*_UpperCAmelCase )
def __a ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_UpperCAmelCase )
def __a ( self : str ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = True
if model_class in get_values(_UpperCAmelCase ):
continue
SCREAMING_SNAKE_CASE__ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model(**_UpperCAmelCase ).loss
loss.backward()
def __a ( self : Optional[Any] ):
"""simple docstring"""
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = True
if model_class in get_values(_UpperCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
SCREAMING_SNAKE_CASE__ = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.train()
SCREAMING_SNAKE_CASE__ = self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model(**_UpperCAmelCase ).loss
loss.backward()
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = _config_zero_init(_UpperCAmelCase )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(config=_UpperCAmelCase )
# Skip the check for the backbone
SCREAMING_SNAKE_CASE__ = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
SCREAMING_SNAKE_CASE__ = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __a ( self : Tuple ):
"""simple docstring"""
pass
@slow
def __a ( self : Tuple ):
"""simple docstring"""
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
SCREAMING_SNAKE_CASE__ = DPTModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE__ = 'add'
with self.assertRaises(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ = DPTForDepthEstimation(_UpperCAmelCase )
def __SCREAMING_SNAKE_CASE ( ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class __snake_case ( unittest.TestCase ):
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
SCREAMING_SNAKE_CASE__ = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = prepare_img()
SCREAMING_SNAKE_CASE__ = image_processor(images=_UpperCAmelCase , return_tensors="""pt""" ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ = model(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = outputs.predicted_depth
# verify the predicted depth
SCREAMING_SNAKE_CASE__ = torch.Size((1, 3_84, 3_84) )
self.assertEqual(predicted_depth.shape , _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = torch.tensor(
[[[5.64_37, 5.61_46, 5.65_11], [5.43_71, 5.56_49, 5.59_58], [5.52_15, 5.51_84, 5.52_93]]] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_00 , _UpperCAmelCase , atol=1E-4 ) )
| 219 |
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : str = LayoutLMTokenizer
lowerCAmelCase : Tuple = LayoutLMTokenizerFast
lowerCAmelCase : List[Any] = True
lowerCAmelCase : int = True
def __lowercase ( self : Dict ):
super().setUp()
_a : int = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_a : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __lowercase ( self : Dict ,**_UpperCAmelCase : List[str] ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname ,**_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : Tuple ):
_a : Optional[int] = 'UNwant\u00E9d,running'
_a : List[Any] = 'unwanted, running'
return input_text, output_text
def __lowercase ( self : Optional[int] ):
_a : Optional[Any] = self.tokenizer_class(self.vocab_file )
_a : Optional[Any] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_UpperCAmelCase ,['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) ,[7, 4, 5, 10, 8, 9] )
def __lowercase ( self : Optional[int] ):
pass
| 89 | 0 |
"""simple docstring"""
import torch
from transformers import CamembertForMaskedLM, CamembertTokenizer
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=5 ) -> Any:
'''simple docstring'''
assert masked_input.count('<mask>' ) == 1
lowercase : Tuple = torch.tensor(tokenizer.encode(lowerCAmelCase_ , add_special_tokens=lowerCAmelCase_ ) ).unsqueeze(0 ) # Batch size 1
lowercase : Union[str, Any] = model(lowerCAmelCase_ )[0] # The last hidden-state is the first element of the output tuple
lowercase : int = (input_ids.squeeze() == tokenizer.mask_token_id).nonzero().item()
lowercase : str = logits[0, masked_index, :]
lowercase : int = logits.softmax(dim=0 )
lowercase : Dict = prob.topk(k=lowerCAmelCase_ , dim=0 )
lowercase : str = ' '.join(
[tokenizer.convert_ids_to_tokens(indices[i].item() ) for i in range(len(lowerCAmelCase_ ) )] )
lowercase : Optional[int] = tokenizer.mask_token
lowercase : Tuple = []
for index, predicted_token_bpe in enumerate(topk_predicted_token_bpe.split(' ' ) ):
lowercase : Optional[Any] = predicted_token_bpe.replace('\u2581' , ' ' )
if " {0}".format(lowerCAmelCase_ ) in masked_input:
topk_filled_outputs.append(
(
masked_input.replace(' {0}'.format(lowerCAmelCase_ ) , lowerCAmelCase_ ),
values[index].item(),
predicted_token,
) )
else:
topk_filled_outputs.append(
(
masked_input.replace(lowerCAmelCase_ , lowerCAmelCase_ ),
values[index].item(),
predicted_token,
) )
return topk_filled_outputs
_UpperCamelCase: Union[str, Any] = CamembertTokenizer.from_pretrained('camembert-base')
_UpperCamelCase: List[str] = CamembertForMaskedLM.from_pretrained('camembert-base')
model.eval()
_UpperCamelCase: List[Any] = 'Le camembert est <mask> :)'
print(fill_mask(masked_input, model, tokenizer, topk=3))
| 255 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Any = 'conditional_detr'
lowerCAmelCase : List[str] = ['past_key_values']
lowerCAmelCase : Optional[int] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Optional[int] ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : List[Any]=3 ,_UpperCAmelCase : List[Any]=300 ,_UpperCAmelCase : Dict=6 ,_UpperCAmelCase : List[str]=2048 ,_UpperCAmelCase : Optional[int]=8 ,_UpperCAmelCase : List[Any]=6 ,_UpperCAmelCase : Optional[int]=2048 ,_UpperCAmelCase : Dict=8 ,_UpperCAmelCase : int=0.0 ,_UpperCAmelCase : Optional[Any]=0.0 ,_UpperCAmelCase : Optional[Any]=True ,_UpperCAmelCase : str="relu" ,_UpperCAmelCase : Tuple=256 ,_UpperCAmelCase : Optional[int]=0.1 ,_UpperCAmelCase : str=0.0 ,_UpperCAmelCase : Optional[int]=0.0 ,_UpperCAmelCase : Union[str, Any]=0.02 ,_UpperCAmelCase : List[str]=1.0 ,_UpperCAmelCase : Any=False ,_UpperCAmelCase : int="sine" ,_UpperCAmelCase : List[str]="resnet50" ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : str=False ,_UpperCAmelCase : str=2 ,_UpperCAmelCase : int=5 ,_UpperCAmelCase : Optional[int]=2 ,_UpperCAmelCase : str=1 ,_UpperCAmelCase : Union[str, Any]=1 ,_UpperCAmelCase : List[str]=2 ,_UpperCAmelCase : Union[str, Any]=5 ,_UpperCAmelCase : List[Any]=2 ,_UpperCAmelCase : Optional[int]=0.25 ,**_UpperCAmelCase : Tuple ,):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
_a : Optional[Any] = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : str = backbone_config.get('model_type' )
_a : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
_a : List[Any] = config_class.from_dict(_UpperCAmelCase )
_a : Tuple = use_timm_backbone
_a : Union[str, Any] = backbone_config
_a : List[Any] = num_channels
_a : Union[str, Any] = num_queries
_a : Optional[Any] = d_model
_a : Tuple = encoder_ffn_dim
_a : Dict = encoder_layers
_a : List[str] = encoder_attention_heads
_a : Union[str, Any] = decoder_ffn_dim
_a : Optional[int] = decoder_layers
_a : int = decoder_attention_heads
_a : Optional[int] = dropout
_a : Tuple = attention_dropout
_a : List[Any] = activation_dropout
_a : str = activation_function
_a : Optional[Any] = init_std
_a : Union[str, Any] = init_xavier_std
_a : List[Any] = encoder_layerdrop
_a : List[Any] = decoder_layerdrop
_a : Dict = encoder_layers
_a : List[Any] = auxiliary_loss
_a : Optional[int] = position_embedding_type
_a : List[Any] = backbone
_a : Optional[int] = use_pretrained_backbone
_a : Optional[int] = dilation
# Hungarian matcher
_a : Tuple = class_cost
_a : str = bbox_cost
_a : Any = giou_cost
# Loss coefficients
_a : Tuple = mask_loss_coefficient
_a : Dict = dice_loss_coefficient
_a : Tuple = cls_loss_coefficient
_a : Any = bbox_loss_coefficient
_a : Dict = giou_loss_coefficient
_a : Union[str, Any] = focal_alpha
super().__init__(is_encoder_decoder=_UpperCAmelCase ,**_UpperCAmelCase )
@property
def __lowercase ( self : Dict ):
return self.encoder_attention_heads
@property
def __lowercase ( self : str ):
return self.d_model
def __lowercase ( self : int ):
_a : List[str] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_a : Dict = self.backbone_config.to_dict()
_a : Union[str, Any] = self.__class__.model_type
return output
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : str = version.parse('1.11' )
@property
def __lowercase ( self : Dict ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def __lowercase ( self : Any ):
return 1E-5
@property
def __lowercase ( self : List[Any] ):
return 12
| 89 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
a : Union[str, Any] = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = ["BeitFeatureExtractor"]
a : Optional[Any] = ["BeitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
"BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BeitForImageClassification",
"BeitForMaskedImageModeling",
"BeitForSemanticSegmentation",
"BeitModel",
"BeitPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[int] = [
"FlaxBeitForImageClassification",
"FlaxBeitForMaskedImageModeling",
"FlaxBeitModel",
"FlaxBeitPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
a : Union[str, Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 311 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __magic_name__ :
def __init__( self : List[str] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : List[str]=13 ,_UpperCAmelCase : Any=32 ,_UpperCAmelCase : Union[str, Any]=3 ,_UpperCAmelCase : Optional[int]=4 ,_UpperCAmelCase : Optional[Any]=[10, 20, 30, 40] ,_UpperCAmelCase : Tuple=[2, 2, 3, 2] ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Union[str, Any]=37 ,_UpperCAmelCase : Optional[int]="gelu" ,_UpperCAmelCase : Optional[Any]=10 ,_UpperCAmelCase : Tuple=0.02 ,_UpperCAmelCase : Any=["stage2", "stage3", "stage4"] ,_UpperCAmelCase : Any=[2, 3, 4] ,_UpperCAmelCase : Tuple=None ,):
_a : Optional[Any] = parent
_a : List[Any] = batch_size
_a : str = image_size
_a : Union[str, Any] = num_channels
_a : List[Any] = num_stages
_a : Dict = hidden_sizes
_a : int = depths
_a : Tuple = is_training
_a : List[str] = use_labels
_a : Dict = intermediate_size
_a : int = hidden_act
_a : int = num_labels
_a : Any = initializer_range
_a : Tuple = out_features
_a : int = out_indices
_a : List[Any] = scope
def __lowercase ( self : Dict ):
_a : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Union[str, Any] = None
if self.use_labels:
_a : Tuple = ids_tensor([self.batch_size] ,self.num_labels )
_a : str = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Any ):
return ConvNextVaConfig(
num_channels=self.num_channels ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_stages=self.num_stages ,hidden_act=self.hidden_act ,is_decoder=_UpperCAmelCase ,initializer_range=self.initializer_range ,out_features=self.out_features ,out_indices=self.out_indices ,num_labels=self.num_labels ,)
def __lowercase ( self : Tuple ,_UpperCAmelCase : Any ,_UpperCAmelCase : Any ,_UpperCAmelCase : Optional[Any] ):
_a : Optional[Any] = ConvNextVaModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Any = model(_UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def __lowercase ( self : Tuple ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ):
_a : List[Any] = ConvNextVaForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : List[str] = model(_UpperCAmelCase ,labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowercase ( self : str ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[Any] ):
_a : Optional[int] = ConvNextVaBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Dict = model(_UpperCAmelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_a : Tuple = None
_a : List[Any] = ConvNextVaBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : List[str] = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def __lowercase ( self : Optional[Any] ):
_a : Any = self.prepare_config_and_inputs()
_a , _a , _a : Union[str, Any] = config_and_inputs
_a : Any = {'pixel_values': pixel_values}
return config, inputs_dict
def __lowercase ( self : str ):
_a : Tuple = self.prepare_config_and_inputs()
_a , _a , _a : Tuple = config_and_inputs
_a : List[Any] = {'pixel_values': pixel_values, 'labels': labels}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : str = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase : str = (
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase : int = False
lowerCAmelCase : str = False
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : List[str] = False
lowerCAmelCase : Optional[int] = False
def __lowercase ( self : List[Any] ):
_a : str = ConvNextVaModelTester(self )
_a : Tuple = ConfigTester(self ,config_class=_UpperCAmelCase ,has_text_modality=_UpperCAmelCase ,hidden_size=37 )
def __lowercase ( self : Optional[Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowercase ( self : str ):
return
@unittest.skip(reason='ConvNextV2 does not use inputs_embeds' )
def __lowercase ( self : List[Any] ):
pass
@unittest.skip(reason='ConvNextV2 does not support input and output embeddings' )
def __lowercase ( self : Optional[int] ):
pass
@unittest.skip(reason='ConvNextV2 does not use feedforward chunking' )
def __lowercase ( self : Any ):
pass
def __lowercase ( self : List[str] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
_a : Any = True
if model_class.__name__ in [
*get_values(_UpperCAmelCase ),
*get_values(_UpperCAmelCase ),
]:
continue
_a : Optional[Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
_a : str = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase )
_a : Optional[int] = model(**_UpperCAmelCase ).loss
loss.backward()
def __lowercase ( self : str ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
_a : Optional[int] = False
_a : Tuple = True
if (
model_class.__name__
in [*get_values(_UpperCAmelCase ), *get_values(_UpperCAmelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
_a : Tuple = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.train()
_a : Any = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase )
_a : List[Any] = model(**_UpperCAmelCase ).loss
loss.backward()
def __lowercase ( self : List[Any] ):
_a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : int = model_class(_UpperCAmelCase )
_a : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Dict = [*signature.parameters.keys()]
_a : int = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_UpperCAmelCase )
def __lowercase ( self : int ):
_a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def __lowercase ( self : Any ):
def check_hidden_states_output(_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Dict ):
_a : Union[str, Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_a : List[Any] = model(**self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ) )
_a : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_a : str = self.model_tester.num_stages
self.assertEqual(len(_UpperCAmelCase ) ,expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
_a , _a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : int = True
check_hidden_states_output(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : Optional[Any] = True
check_hidden_states_output(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
def __lowercase ( self : List[Any] ):
_a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def __lowercase ( self : int ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Any = ConvNextVaModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __lowerCamelCase ( ) -> List[Any]:
_a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
@cached_property
def __lowercase ( self : Optional[Any] ):
return AutoImageProcessor.from_pretrained('facebook/convnextv2-tiny-1k-224' ) if is_vision_available() else None
@slow
def __lowercase ( self : Any ):
_a : List[str] = ConvNextVaForImageClassification.from_pretrained('facebook/convnextv2-tiny-1k-224' ).to(_UpperCAmelCase )
_a : Optional[int] = self.default_image_processor
_a : str = prepare_img()
_a : str = preprocessor(images=_UpperCAmelCase ,return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_a : Dict = model(**_UpperCAmelCase )
# verify the logits
_a : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,_UpperCAmelCase )
_a : Optional[Any] = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_UpperCAmelCase ,atol=1E-4 ) )
| 89 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __SCREAMING_SNAKE_CASE :
def __init__( self : List[str] , __lowercase : List[Any] , __lowercase : List[str]=13 , __lowercase : Any=32 , __lowercase : Union[str, Any]=3 , __lowercase : Optional[int]=4 , __lowercase : Optional[Any]=[10, 20, 30, 40] , __lowercase : Tuple=[2, 2, 3, 2] , __lowercase : Optional[int]=True , __lowercase : Optional[int]=True , __lowercase : Union[str, Any]=37 , __lowercase : Optional[int]="gelu" , __lowercase : Optional[Any]=10 , __lowercase : Tuple=0.02 , __lowercase : Any=["stage2", "stage3", "stage4"] , __lowercase : Any=[2, 3, 4] , __lowercase : Tuple=None , ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =parent
SCREAMING_SNAKE_CASE__ : List[Any] =batch_size
SCREAMING_SNAKE_CASE__ : str =image_size
SCREAMING_SNAKE_CASE__ : Union[str, Any] =num_channels
SCREAMING_SNAKE_CASE__ : List[Any] =num_stages
SCREAMING_SNAKE_CASE__ : Dict =hidden_sizes
SCREAMING_SNAKE_CASE__ : int =depths
SCREAMING_SNAKE_CASE__ : Tuple =is_training
SCREAMING_SNAKE_CASE__ : List[str] =use_labels
SCREAMING_SNAKE_CASE__ : Dict =intermediate_size
SCREAMING_SNAKE_CASE__ : int =hidden_act
SCREAMING_SNAKE_CASE__ : int =num_labels
SCREAMING_SNAKE_CASE__ : Any =initializer_range
SCREAMING_SNAKE_CASE__ : Tuple =out_features
SCREAMING_SNAKE_CASE__ : int =out_indices
SCREAMING_SNAKE_CASE__ : List[Any] =scope
def __magic_name__ ( self : Dict ) -> str:
SCREAMING_SNAKE_CASE__ : List[Any] =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =None
if self.use_labels:
SCREAMING_SNAKE_CASE__ : Tuple =ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE__ : str =self.get_config()
return config, pixel_values, labels
def __magic_name__ ( self : Any ) -> Dict:
return ConvNextVaConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=_UpperCAmelCase , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __magic_name__ ( self : Tuple , __lowercase : Any , __lowercase : Any , __lowercase : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE__ : Optional[Any] =ConvNextVaModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : Any =model(_UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __magic_name__ ( self : Tuple , __lowercase : Union[str, Any] , __lowercase : List[Any] , __lowercase : int ) -> Any:
SCREAMING_SNAKE_CASE__ : List[Any] =ConvNextVaForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] =model(_UpperCAmelCase , labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __magic_name__ ( self : str , __lowercase : List[Any] , __lowercase : str , __lowercase : Optional[Any] ) -> int:
SCREAMING_SNAKE_CASE__ : Optional[int] =ConvNextVaBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : Dict =model(_UpperCAmelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
SCREAMING_SNAKE_CASE__ : Tuple =None
SCREAMING_SNAKE_CASE__ : List[Any] =ConvNextVaBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE__ : List[str] =model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __magic_name__ ( self : Optional[Any] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Any =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : Union[str, Any] =config_and_inputs
SCREAMING_SNAKE_CASE__ : Any ={'pixel_values': pixel_values}
return config, inputs_dict
def __magic_name__ ( self : str ) -> int:
SCREAMING_SNAKE_CASE__ : Tuple =self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE__ : Tuple =config_and_inputs
SCREAMING_SNAKE_CASE__ : List[Any] ={'pixel_values': pixel_values, 'labels': labels}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
snake_case_ = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
snake_case_ = (
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def __magic_name__ ( self : List[Any] ) -> Optional[Any]:
SCREAMING_SNAKE_CASE__ : str =ConvNextVaModelTester(self )
SCREAMING_SNAKE_CASE__ : Tuple =ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase , hidden_size=37 )
def __magic_name__ ( self : Optional[Any] ) -> Optional[int]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __magic_name__ ( self : str ) -> str:
return
@unittest.skip(reason='''ConvNextV2 does not use inputs_embeds''' )
def __magic_name__ ( self : List[Any] ) -> Optional[Any]:
pass
@unittest.skip(reason='''ConvNextV2 does not support input and output embeddings''' )
def __magic_name__ ( self : Optional[int] ) -> Union[str, Any]:
pass
@unittest.skip(reason='''ConvNextV2 does not use feedforward chunking''' )
def __magic_name__ ( self : Any ) -> Union[str, Any]:
pass
def __magic_name__ ( self : List[str] ) -> str:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.prepare_config_and_inputs_with_labels()
SCREAMING_SNAKE_CASE__ : Any =True
if model_class.__name__ in [
*get_values(_UpperCAmelCase ),
*get_values(_UpperCAmelCase ),
]:
continue
SCREAMING_SNAKE_CASE__ : Optional[Any] =model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
SCREAMING_SNAKE_CASE__ : str =self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] =model(**_UpperCAmelCase ).loss
loss.backward()
def __magic_name__ ( self : str ) -> Tuple:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.model_tester.prepare_config_and_inputs_with_labels()
SCREAMING_SNAKE_CASE__ : Optional[int] =False
SCREAMING_SNAKE_CASE__ : Tuple =True
if (
model_class.__name__
in [*get_values(_UpperCAmelCase ), *get_values(_UpperCAmelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
SCREAMING_SNAKE_CASE__ : Tuple =model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.train()
SCREAMING_SNAKE_CASE__ : Any =self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase , return_labels=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] =model(**_UpperCAmelCase ).loss
loss.backward()
def __magic_name__ ( self : List[Any] ) -> int:
SCREAMING_SNAKE_CASE__ : List[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : int =model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE__ : Dict =[*signature.parameters.keys()]
SCREAMING_SNAKE_CASE__ : int =['pixel_values']
self.assertListEqual(arg_names[:1] , _UpperCAmelCase )
def __magic_name__ ( self : int ) -> Any:
SCREAMING_SNAKE_CASE__ : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def __magic_name__ ( self : Any ) -> Any:
def check_hidden_states_output(__lowercase : List[Any] , __lowercase : Tuple , __lowercase : Dict ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] =model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : List[Any] =model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : Any =outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE__ : str =self.model_tester.num_stages
self.assertEqual(len(_UpperCAmelCase ) , expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
SCREAMING_SNAKE_CASE__ : List[str] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE__ : int =True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE__ : Optional[Any] =True
check_hidden_states_output(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def __magic_name__ ( self : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def __magic_name__ ( self : int ) -> Union[str, Any]:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE__ : Any =ConvNextVaModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
@cached_property
def __magic_name__ ( self : Optional[Any] ) -> int:
return AutoImageProcessor.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ) if is_vision_available() else None
@slow
def __magic_name__ ( self : Any ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ : List[str] =ConvNextVaForImageClassification.from_pretrained('''facebook/convnextv2-tiny-1k-224''' ).to(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] =self.default_image_processor
SCREAMING_SNAKE_CASE__ : str =prepare_img()
SCREAMING_SNAKE_CASE__ : str =preprocessor(images=_UpperCAmelCase , return_tensors='''pt''' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE__ : Dict =model(**_UpperCAmelCase )
# verify the logits
SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =torch.tensor([0.9996, 0.1966, -0.4386] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _UpperCAmelCase , atol=1e-4 ) ) | 152 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
'''configuration_lilt''': ['''LILT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LiltConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''LILT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LiltForQuestionAnswering''',
'''LiltForSequenceClassification''',
'''LiltForTokenClassification''',
'''LiltModel''',
'''LiltPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89 | 0 |
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase__ : Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase__ : Any = '''https://openaipublic.azureedge.net/jukebox/models/'''
lowerCAmelCase__ : Optional[int] = {
'''jukebox-1b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''1b_lyrics/prior_level_2.pth.tar''',
],
'''jukebox-5b-lyrics''': [
'''5b/vqvae.pth.tar''',
'''5b/prior_level_0.pth.tar''',
'''5b/prior_level_1.pth.tar''',
'''5b_lyrics/prior_level_2.pth.tar''',
],
}
def UpperCamelCase__ ( A__ ) -> str:
if key.endswith('.model.1.bias' ) and len(key.split('.' ) ) > 10:
snake_case__ : Optional[Any] = key.replace('.model.1.bias' , '.conv1d_1.bias' )
elif key.endswith('.model.1.weight' ) and len(key.split('.' ) ) > 10:
snake_case__ : List[Any] = key.replace('.model.1.weight' , '.conv1d_1.weight' )
elif key.endswith('.model.3.bias' ) and len(key.split('.' ) ) > 10:
snake_case__ : Dict = key.replace('.model.3.bias' , '.conv1d_2.bias' )
elif key.endswith('.model.3.weight' ) and len(key.split('.' ) ) > 10:
snake_case__ : Any = key.replace('.model.3.weight' , '.conv1d_2.weight' )
if "conditioner_blocks.0." in key:
snake_case__ : str = key.replace('conditioner_blocks.0' , 'conditioner_blocks' )
if "prime_prior" in key:
snake_case__ : Any = key.replace('prime_prior' , 'encoder' )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
snake_case__ : Any = key.replace('.emb.' , '.' )
if key.endswith('k' ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace('.k' , '.codebook' )
if "y_emb." in key:
return key.replace('y_emb.' , 'metadata_embedding.' )
if "x_emb.emb." in key:
snake_case__ : Any = key.replace('0.x_emb.emb' , 'embed_tokens' )
if "prime_state_ln" in key:
return key.replace('prime_state_ln' , 'encoder.final_layer_norm' )
if ".ln" in key:
return key.replace('.ln' , '.layer_norm' )
if "_ln" in key:
return key.replace('_ln' , '_layer_norm' )
if "prime_state_proj" in key:
return key.replace('prime_state_proj' , 'encoder.proj_in' )
if "prime_x_out" in key:
return key.replace('prime_x_out' , 'encoder.lm_head' )
if "prior.x_out" in key:
return key.replace('x_out' , 'fc_proj_out' )
if "x_emb" in key:
return key.replace('x_emb' , 'embed_tokens' )
return key
def UpperCamelCase__ ( A__ , A__ , A__ , A__ ) -> Union[str, Any]:
snake_case__ : int = {}
import re
snake_case__ : List[str] = re.compile(r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
snake_case__ : List[Any] = re.compile(
r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
snake_case__ : Optional[Any] = re.compile(r'encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
snake_case__ : Union[str, Any] = re.compile(r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)' )
snake_case__ : int = re.compile(
r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
snake_case__ : Optional[Any] = re.compile(r'decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)' )
snake_case__ : Dict = re.compile(r'conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)' )
snake_case__ : Optional[int] = re.compile(
r'conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)' )
snake_case__ : str = re.compile(r'conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)' )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(lowerCAmelCase_ ):
snake_case__ : List[str] = re_encoder_block_conv_in.match(lowerCAmelCase_ )
snake_case__ : int = regex_match.groups()
snake_case__ : Optional[Any] = int(groups[2] ) * 2 + int(groups[3] )
snake_case__ : Dict = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}"""
snake_case__ : Dict = re_encoder_block_conv_in.sub(lowerCAmelCase_ , lowerCAmelCase_ )
elif re_encoder_block_resnet.fullmatch(lowerCAmelCase_ ):
snake_case__ : Optional[int] = re_encoder_block_resnet.match(lowerCAmelCase_ )
snake_case__ : Any = regex_match.groups()
snake_case__ : Dict = int(groups[2] ) * 2 + int(groups[3] )
snake_case__ : Any = {'1': 1, '3': 2}[groups[-2]]
snake_case__ : Tuple = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}."""
snake_case__ : Union[str, Any] = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
snake_case__ : Any = prefix + resnet_block
snake_case__ : Optional[Any] = re_encoder_block_resnet.sub(lowerCAmelCase_ , lowerCAmelCase_ )
elif re_encoder_block_proj_out.fullmatch(lowerCAmelCase_ ):
snake_case__ : Union[str, Any] = re_encoder_block_proj_out.match(lowerCAmelCase_ )
snake_case__ : Dict = regex_match.groups()
snake_case__ : Any = F"""encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}"""
snake_case__ : Tuple = re_encoder_block_proj_out.sub(lowerCAmelCase_ , lowerCAmelCase_ )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(lowerCAmelCase_ ):
snake_case__ : Dict = re_decoder_block_conv_out.match(lowerCAmelCase_ )
snake_case__ : Union[str, Any] = regex_match.groups()
snake_case__ : Tuple = int(groups[2] ) * 2 + int(groups[3] ) - 2
snake_case__ : List[str] = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}"""
snake_case__ : List[str] = re_decoder_block_conv_out.sub(lowerCAmelCase_ , lowerCAmelCase_ )
elif re_decoder_block_resnet.fullmatch(lowerCAmelCase_ ):
snake_case__ : int = re_decoder_block_resnet.match(lowerCAmelCase_ )
snake_case__ : List[str] = regex_match.groups()
snake_case__ : Union[str, Any] = int(groups[2] ) * 2 + int(groups[3] ) - 2
snake_case__ : Union[str, Any] = {'1': 1, '3': 2}[groups[-2]]
snake_case__ : Any = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}."""
snake_case__ : Tuple = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
snake_case__ : List[Any] = prefix + resnet_block
snake_case__ : str = re_decoder_block_resnet.sub(lowerCAmelCase_ , lowerCAmelCase_ )
elif re_decoder_block_proj_in.fullmatch(lowerCAmelCase_ ):
snake_case__ : List[str] = re_decoder_block_proj_in.match(lowerCAmelCase_ )
snake_case__ : List[Any] = regex_match.groups()
snake_case__ : int = F"""decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}"""
snake_case__ : Dict = re_decoder_block_proj_in.sub(lowerCAmelCase_ , lowerCAmelCase_ )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(lowerCAmelCase_ ):
snake_case__ : Optional[Any] = re_prior_cond_conv_out.match(lowerCAmelCase_ )
snake_case__ : Tuple = regex_match.groups()
snake_case__ : List[str] = int(groups[1] ) * 2 + int(groups[2] ) - 2
snake_case__ : Tuple = F"""conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}"""
snake_case__ : List[str] = re_prior_cond_conv_out.sub(lowerCAmelCase_ , lowerCAmelCase_ )
elif re_prior_cond_resnet.fullmatch(lowerCAmelCase_ ):
snake_case__ : str = re_prior_cond_resnet.match(lowerCAmelCase_ )
snake_case__ : Tuple = regex_match.groups()
snake_case__ : Optional[Any] = int(groups[1] ) * 2 + int(groups[2] ) - 2
snake_case__ : Any = {'1': 1, '3': 2}[groups[-2]]
snake_case__ : int = F"""conditioner_blocks.upsampler.upsample_block.{block_index}."""
snake_case__ : List[str] = F"""resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}"""
snake_case__ : Union[str, Any] = prefix + resnet_block
snake_case__ : Any = re_prior_cond_resnet.sub(lowerCAmelCase_ , lowerCAmelCase_ )
elif re_prior_cond_proj_in.fullmatch(lowerCAmelCase_ ):
snake_case__ : Dict = re_prior_cond_proj_in.match(lowerCAmelCase_ )
snake_case__ : Tuple = regex_match.groups()
snake_case__ : Optional[int] = F"""conditioner_blocks.upsampler.proj_in.{groups[-1]}"""
snake_case__ : List[str] = re_prior_cond_proj_in.sub(lowerCAmelCase_ , lowerCAmelCase_ )
# keep original key
else:
snake_case__ : Tuple = original_key
snake_case__ : int = replace_key(lowerCAmelCase_ )
if F"""{key_prefix}.{key}""" not in model_state_dict or key is None:
print(F"""failed converting {original_key} to {key}, does not match""" )
# handle missmatched shape
elif value.shape != model_state_dict[F"""{key_prefix}.{key}"""].shape:
snake_case__ : Dict = model_state_dict[F"""{key_prefix}.{key}"""]
print(F"""{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match""" )
snake_case__ : Union[str, Any] = original_key
snake_case__ : Any = original_key
snake_case__ : Any = value
return new_dict
@torch.no_grad()
def UpperCamelCase__ ( A__=None , A__=None ) -> Optional[int]:
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(F"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" ):
snake_case__ : List[str] = requests.get(F"""{PREFIX}{file}""" , allow_redirects=lowerCAmelCase_ )
os.makedirs(F"""{pytorch_dump_folder_path}/""" , exist_ok=lowerCAmelCase_ )
open(F"""{pytorch_dump_folder_path}/{file.split('/' )[-1]}""" , 'wb' ).write(r.content )
snake_case__ : str = MODEL_MAPPING[model_name.split('/' )[-1]]
snake_case__ : List[Any] = JukeboxConfig.from_pretrained(lowerCAmelCase_ )
snake_case__ : Tuple = JukeboxModel(lowerCAmelCase_ )
snake_case__ : Optional[int] = []
snake_case__ : int = {}
for i, dict_name in enumerate(lowerCAmelCase_ ):
snake_case__ : int = torch.load(F"""{pytorch_dump_folder_path}/{dict_name.split('/' )[-1]}""" )['model']
snake_case__ : Dict = {}
for k in old_dic.keys():
if k.endswith('.b' ):
snake_case__ : Optional[Any] = old_dic[k]
elif k.endswith('.w' ):
snake_case__ : str = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
snake_case__ : Union[str, Any] = old_dic[k]
else:
snake_case__ : str = old_dic[k]
snake_case__ : Optional[int] = 'vqvae' if i == 0 else F"""priors.{3 - i}"""
snake_case__ : int = fix_jukebox_keys(lowerCAmelCase_ , model.state_dict() , lowerCAmelCase_ , lowerCAmelCase_ )
weight_dict.append(lowerCAmelCase_ )
snake_case__ : Any = weight_dict.pop(0 )
model.vqvae.load_state_dict(lowerCAmelCase_ )
for i in range(len(lowerCAmelCase_ ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
with open(F"""{pytorch_dump_folder_path}/mapping.json""" , 'w' ) as txtfile:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
print(F"""Saving model {model_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(lowerCAmelCase_ )
return weight_dict
if __name__ == "__main__":
lowerCAmelCase__ : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''jukebox-5b-lyrics''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''jukebox-5b-lyrics-converted''',
type=str,
help='''Path to the output PyTorch model directory.''',
)
lowerCAmelCase__ : List[str] = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 143 |
'''simple docstring'''
import math
def __lowerCamelCase ( lowerCAmelCase_ ) -> bool:
_a : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ = 1 / 12345 ) -> int:
_a : int = 0
_a : Optional[Any] = 0
_a : int = 3
while True:
_a : Tuple = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(lowerCAmelCase_ ):
_a : Union[str, Any] = int(lowerCAmelCase_ )
total_partitions += 1
if check_partition_perfect(lowerCAmelCase_ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(lowerCAmelCase_ )
integer += 1
if __name__ == "__main__":
print(f"""{solution() = }""")
| 89 | 0 |
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __lowerCamelCase ( _UpperCamelCase):
"""simple docstring"""
@require_torch
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_UpperCAmelCase = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_UpperCAmelCase = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_UpperCAmelCase = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_UpperCAmelCase )
BertModel.from_pretrained(_UpperCAmelCase )
BertTokenizer.from_pretrained(_UpperCAmelCase )
pipeline(task='fill-mask' , model=_UpperCAmelCase )
# baseline - just load from_pretrained with normal network
_UpperCAmelCase = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_UpperCAmelCase = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_UpperCAmelCase = '1'
_UpperCAmelCase = subprocess.run(_UpperCAmelCase , env=_UpperCAmelCase , check=_UpperCAmelCase , capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_UpperCAmelCase = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_UpperCAmelCase = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_UpperCAmelCase = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_UpperCAmelCase )
BertModel.from_pretrained(_UpperCAmelCase )
BertTokenizer.from_pretrained(_UpperCAmelCase )
pipeline(task='fill-mask' , model=_UpperCAmelCase )
# baseline - just load from_pretrained with normal network
_UpperCAmelCase = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_UpperCAmelCase = self.get_env()
_UpperCAmelCase = subprocess.run(_UpperCAmelCase , env=_UpperCAmelCase , check=_UpperCAmelCase , capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
_UpperCAmelCase = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
_UpperCAmelCase = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
_UpperCAmelCase = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_UpperCAmelCase = self.get_env()
_UpperCAmelCase = subprocess.run(_UpperCAmelCase , env=_UpperCAmelCase , check=_UpperCAmelCase , capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# next emulate no network
_UpperCAmelCase = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_UpperCAmelCase = '1'
_UpperCAmelCase = subprocess.run(_UpperCAmelCase , env=_UpperCAmelCase , check=_UpperCAmelCase , capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
@require_torch
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = '\nfrom transformers import pipeline\n '
_UpperCAmelCase = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
_UpperCAmelCase = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
_UpperCAmelCase = self.get_env()
_UpperCAmelCase = '1'
_UpperCAmelCase = [sys.executable, '-c', '\n'.join([load, mock, run] )]
_UpperCAmelCase = subprocess.run(_UpperCAmelCase , env=_UpperCAmelCase , check=_UpperCAmelCase , capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode , 1 , result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' , result.stderr.decode().replace('\n' , '' ) , )
@require_torch
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = '\nfrom transformers import AutoModel\n '
_UpperCAmelCase = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
_UpperCAmelCase = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_UpperCAmelCase = self.get_env()
_UpperCAmelCase = subprocess.run(_UpperCAmelCase , env=_UpperCAmelCase , check=_UpperCAmelCase , capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_UpperCAmelCase = '1'
_UpperCAmelCase = subprocess.run(_UpperCAmelCase , env=_UpperCAmelCase , check=_UpperCAmelCase , capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode , 0 , result.stderr )
self.assertIn('success' , result.stdout.decode() )
| 39 |
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=1 ) -> Dict:
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=0 ) -> Tuple:
_a : Any = []
for old_item in old_list:
_a : Union[str, Any] = old_item.replace('in_layers.0' , 'norm1' )
_a : Optional[int] = new_item.replace('in_layers.2' , 'conv1' )
_a : str = new_item.replace('out_layers.0' , 'norm2' )
_a : List[str] = new_item.replace('out_layers.3' , 'conv2' )
_a : str = new_item.replace('emb_layers.1' , 'time_emb_proj' )
_a : Tuple = new_item.replace('skip_connection' , 'conv_shortcut' )
_a : Any = shave_segments(lowerCAmelCase_ , n_shave_prefix_segments=lowerCAmelCase_ )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=0 ) -> Any:
_a : List[str] = []
for old_item in old_list:
_a : List[Any] = old_item
_a : Optional[int] = new_item.replace('norm.weight' , 'group_norm.weight' )
_a : Optional[Any] = new_item.replace('norm.bias' , 'group_norm.bias' )
_a : Any = new_item.replace('proj_out.weight' , 'proj_attn.weight' )
_a : Optional[Any] = new_item.replace('proj_out.bias' , 'proj_attn.bias' )
_a : Optional[int] = shave_segments(lowerCAmelCase_ , n_shave_prefix_segments=lowerCAmelCase_ )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> Any:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_a : Optional[Any] = old_checkpoint[path]
_a : Optional[Any] = old_tensor.shape[0] // 3
_a : Any = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_a : int = old_tensor.shape[0] // config['num_head_channels'] // 3
_a : str = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_a , _a , _a : Tuple = old_tensor.split(channels // num_heads , dim=1 )
_a : Dict = query.reshape(lowerCAmelCase_ )
_a : str = key.reshape(lowerCAmelCase_ )
_a : Optional[int] = value.reshape(lowerCAmelCase_ )
for path in paths:
_a : Dict = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_a : Any = new_path.replace('middle_block.0' , 'mid_block.resnets.0' )
_a : str = new_path.replace('middle_block.1' , 'mid_block.attentions.0' )
_a : Union[str, Any] = new_path.replace('middle_block.2' , 'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
_a : int = new_path.replace(replacement['old'] , replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_a : List[str] = old_checkpoint[path['old']][:, :, 0]
else:
_a : Dict = old_checkpoint[path['old']]
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_a : Optional[int] = {}
_a : Dict = checkpoint['time_embed.0.weight']
_a : Tuple = checkpoint['time_embed.0.bias']
_a : Union[str, Any] = checkpoint['time_embed.2.weight']
_a : List[str] = checkpoint['time_embed.2.bias']
_a : List[str] = checkpoint['input_blocks.0.0.weight']
_a : Union[str, Any] = checkpoint['input_blocks.0.0.bias']
_a : Optional[int] = checkpoint['out.0.weight']
_a : int = checkpoint['out.0.bias']
_a : List[str] = checkpoint['out.2.weight']
_a : Optional[int] = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
_a : Optional[int] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
_a : Dict = {
layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key]
for layer_id in range(lowerCAmelCase_ )
}
# Retrieves the keys for the middle blocks only
_a : List[Any] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
_a : Union[str, Any] = {
layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key]
for layer_id in range(lowerCAmelCase_ )
}
# Retrieves the keys for the output blocks only
_a : Optional[int] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
_a : str = {
layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key]
for layer_id in range(lowerCAmelCase_ )
}
for i in range(1 , lowerCAmelCase_ ):
_a : List[Any] = (i - 1) // (config['num_res_blocks'] + 1)
_a : Optional[int] = (i - 1) % (config['num_res_blocks'] + 1)
_a : Optional[int] = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key]
_a : Optional[Any] = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key]
if f"""input_blocks.{i}.0.op.weight""" in checkpoint:
_a : List[Any] = checkpoint[
f"""input_blocks.{i}.0.op.weight"""
]
_a : Union[str, Any] = checkpoint[
f"""input_blocks.{i}.0.op.bias"""
]
continue
_a : Any = renew_resnet_paths(lowerCAmelCase_ )
_a : List[str] = {'old': f"""input_blocks.{i}.0""", 'new': f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
_a : Optional[Any] = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path, resnet_op] , config=lowerCAmelCase_ )
if len(lowerCAmelCase_ ):
_a : List[str] = renew_attention_paths(lowerCAmelCase_ )
_a : List[Any] = {
'old': f"""input_blocks.{i}.1""",
'new': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
_a : Optional[Any] = {
f"""input_blocks.{i}.1.qkv.bias""": {
'key': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'query': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'value': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""input_blocks.{i}.1.qkv.weight""": {
'key': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'query': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'value': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , attention_paths_to_split=lowerCAmelCase_ , config=lowerCAmelCase_ , )
_a : str = middle_blocks[0]
_a : Tuple = middle_blocks[1]
_a : Any = middle_blocks[2]
_a : List[Any] = renew_resnet_paths(lowerCAmelCase_ )
assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , config=lowerCAmelCase_ )
_a : Any = renew_resnet_paths(lowerCAmelCase_ )
assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , config=lowerCAmelCase_ )
_a : int = renew_attention_paths(lowerCAmelCase_ )
_a : int = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , attention_paths_to_split=lowerCAmelCase_ , config=lowerCAmelCase_ )
for i in range(lowerCAmelCase_ ):
_a : List[str] = i // (config['num_res_blocks'] + 1)
_a : Any = i % (config['num_res_blocks'] + 1)
_a : Union[str, Any] = [shave_segments(lowerCAmelCase_ , 2 ) for name in output_blocks[i]]
_a : Optional[Any] = {}
for layer in output_block_layers:
_a , _a : str = layer.split('.' )[0], shave_segments(lowerCAmelCase_ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(lowerCAmelCase_ )
else:
_a : str = [layer_name]
if len(lowerCAmelCase_ ) > 1:
_a : str = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key]
_a : Optional[Any] = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key]
_a : Dict = renew_resnet_paths(lowerCAmelCase_ )
_a : str = renew_resnet_paths(lowerCAmelCase_ )
_a : Optional[int] = {'old': f"""output_blocks.{i}.0""", 'new': f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , config=lowerCAmelCase_ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_a : List[Any] = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
_a : Tuple = checkpoint[
f"""output_blocks.{i}.{index}.conv.weight"""
]
_a : List[str] = checkpoint[
f"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(lowerCAmelCase_ ) == 2:
_a : Union[str, Any] = []
if len(lowerCAmelCase_ ):
_a : Tuple = renew_attention_paths(lowerCAmelCase_ )
_a : str = {
'old': f"""output_blocks.{i}.1""",
'new': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
_a : List[Any] = {
f"""output_blocks.{i}.1.qkv.bias""": {
'key': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'query': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'value': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""output_blocks.{i}.1.qkv.weight""": {
'key': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'query': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'value': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=lowerCAmelCase_ , )
else:
_a : List[Any] = renew_resnet_paths(lowerCAmelCase_ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_a : int = '.'.join(['output_blocks', str(lowerCAmelCase_ ), path['old']] )
_a : Union[str, Any] = '.'.join(['up_blocks', str(lowerCAmelCase_ ), 'resnets', str(lowerCAmelCase_ ), path['new']] )
_a : Union[str, Any] = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__lowerCAmelCase = json.loads(f.read())
__lowerCAmelCase = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__lowerCAmelCase = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__lowerCAmelCase = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__lowerCAmelCase = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__lowerCAmelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 89 | 0 |
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class A :
'''simple docstring'''
@staticmethod
def a_ ( *__lowerCAmelCase : Optional[int] , **__lowerCAmelCase : str ) -> Dict:
"""simple docstring"""
pass
def __lowerCamelCase ( __a :Optional[int] ) -> str:
"""simple docstring"""
A__ = hashlib.mda(image.tobytes() )
return m.hexdigest()[:1_0]
def __lowerCamelCase ( __a :Dict ) -> Dict:
"""simple docstring"""
A__ = np.array(lowerCAmelCase_ )
A__ = npimg.shape
return {"hash": hashimage(lowerCAmelCase_ ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class A (unittest.TestCase ):
'''simple docstring'''
__lowerCamelCase : List[str] = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
__lowerCamelCase : str = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def a_ ( self : Any , __lowerCAmelCase : List[Any] , __lowerCAmelCase : str , __lowerCAmelCase : Any ) -> Any:
"""simple docstring"""
A__ = MaskGenerationPipeline(model=_UpperCAmelCase , image_processor=_UpperCAmelCase )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def a_ ( self : List[Any] , __lowerCAmelCase : List[str] , __lowerCAmelCase : Any ) -> Union[str, Any]:
"""simple docstring"""
pass
@require_tf
@unittest.skip("""Image segmentation not implemented in TF""" )
def a_ ( self : Any ) -> int:
"""simple docstring"""
pass
@slow
@require_torch
def a_ ( self : Optional[int] ) -> str:
"""simple docstring"""
A__ = pipeline("""mask-generation""" , model="""facebook/sam-vit-huge""" )
A__ = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" , points_per_batch=2_56 )
# Shortening by hashing
A__ = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(_UpperCAmelCase ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (4_80, 6_40)}, """scores""": 1.0_4_4_4},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (4_80, 6_40)}, """scores""": 1.0_2_1},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (4_80, 6_40)}, """scores""": 1.0_1_6_7},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (4_80, 6_40)}, """scores""": 1.0_1_3_2},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (4_80, 6_40)}, """scores""": 1.0_0_5_3},
{"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (4_80, 6_40)}, """scores""": 0.9_9_6_7},
{"""mask""": {"""hash""": """453c7844bd""", """shape""": (4_80, 6_40)}, """scores""": 0.9_9_3},
{"""mask""": {"""hash""": """3d44f2926d""", """shape""": (4_80, 6_40)}, """scores""": 0.9_9_0_9},
{"""mask""": {"""hash""": """64033ddc3f""", """shape""": (4_80, 6_40)}, """scores""": 0.9_8_7_9},
{"""mask""": {"""hash""": """801064ff79""", """shape""": (4_80, 6_40)}, """scores""": 0.9_8_3_4},
{"""mask""": {"""hash""": """6172f276ef""", """shape""": (4_80, 6_40)}, """scores""": 0.9_7_1_6},
{"""mask""": {"""hash""": """b49e60e084""", """shape""": (4_80, 6_40)}, """scores""": 0.9_6_1_2},
{"""mask""": {"""hash""": """a811e775fd""", """shape""": (4_80, 6_40)}, """scores""": 0.9_5_9_9},
{"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (4_80, 6_40)}, """scores""": 0.9_5_5_2},
{"""mask""": {"""hash""": """9d8257e080""", """shape""": (4_80, 6_40)}, """scores""": 0.9_5_3_2},
{"""mask""": {"""hash""": """32de6454a8""", """shape""": (4_80, 6_40)}, """scores""": 0.9_5_1_6},
{"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (4_80, 6_40)}, """scores""": 0.9_4_9_9},
{"""mask""": {"""hash""": """3c6db475fb""", """shape""": (4_80, 6_40)}, """scores""": 0.9_4_8_3},
{"""mask""": {"""hash""": """c290813fb9""", """shape""": (4_80, 6_40)}, """scores""": 0.9_4_6_4},
{"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (4_80, 6_40)}, """scores""": 0.9_4_3},
{"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (4_80, 6_40)}, """scores""": 0.9_4_3},
{"""mask""": {"""hash""": """c749b25868""", """shape""": (4_80, 6_40)}, """scores""": 0.9_4_0_8},
{"""mask""": {"""hash""": """efb6cab859""", """shape""": (4_80, 6_40)}, """scores""": 0.9_3_3_5},
{"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (4_80, 6_40)}, """scores""": 0.9_3_2_6},
{"""mask""": {"""hash""": """788b798e24""", """shape""": (4_80, 6_40)}, """scores""": 0.9_2_6_2},
{"""mask""": {"""hash""": """abea804f0e""", """shape""": (4_80, 6_40)}, """scores""": 0.8_9_9_9},
{"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (4_80, 6_40)}, """scores""": 0.8_9_8_6},
{"""mask""": {"""hash""": """cd24047c8a""", """shape""": (4_80, 6_40)}, """scores""": 0.8_9_8_4},
{"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (4_80, 6_40)}, """scores""": 0.8_8_7_3},
{"""mask""": {"""hash""": """b5f47c9191""", """shape""": (4_80, 6_40)}, """scores""": 0.8_8_7_1}
] , )
# fmt: on
@require_torch
@slow
def a_ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
A__ = 'facebook/sam-vit-huge'
A__ = pipeline("""mask-generation""" , model=_UpperCAmelCase )
A__ = image_segmenter(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , pred_iou_thresh=1 , points_per_batch=2_56 )
# Shortening by hashing
A__ = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(_UpperCAmelCase ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(_UpperCAmelCase , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (4_80, 6_40)}, """scores""": 1.0_4_4_4},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (4_80, 6_40)}, """scores""": 1.0_2_1_0},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (4_80, 6_40)}, """scores""": 1.0_1_6_7},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (4_80, 6_40)}, """scores""": 1.0_1_3_2},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (4_80, 6_40)}, """scores""": 1.0_0_5_3},
] , )
| 274 |
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> np.array:
_a : Optional[int] = f"""{sampling_rate}"""
_a : Any = '1'
_a : Optional[int] = 'f32le'
_a : Any = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(lowerCAmelCase_ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
_a : int = ffmpeg_process.communicate(lowerCAmelCase_ )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
_a : int = output_stream[0]
_a : List[str] = np.frombuffer(lowerCAmelCase_ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = "f32le" , ) -> Union[str, Any]:
_a : List[str] = f"""{sampling_rate}"""
_a : List[str] = '1'
if format_for_conversion == "s16le":
_a : List[Any] = 2
elif format_for_conversion == "f32le":
_a : Dict = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
_a : Any = platform.system()
if system == "Linux":
_a : Union[str, Any] = 'alsa'
_a : Union[str, Any] = 'default'
elif system == "Darwin":
_a : Any = 'avfoundation'
_a : Optional[int] = ':0'
elif system == "Windows":
_a : str = 'dshow'
_a : Tuple = 'default'
_a : str = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
_a : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
_a : Union[str, Any] = _ffmpeg_stream(lowerCAmelCase_ , lowerCAmelCase_ )
for item in iterator:
yield item
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = "f32le" , ) -> str:
if stream_chunk_s is not None:
_a : str = stream_chunk_s
else:
_a : List[str] = chunk_length_s
_a : int = ffmpeg_microphone(lowerCAmelCase_ , lowerCAmelCase_ , format_for_conversion=lowerCAmelCase_ )
if format_for_conversion == "s16le":
_a : Optional[Any] = np.intaa
_a : List[Any] = 2
elif format_for_conversion == "f32le":
_a : Tuple = np.floataa
_a : Any = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
_a : str = chunk_length_s / 6
_a : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(lowerCAmelCase_ , (int, float) ):
_a : List[str] = [stride_length_s, stride_length_s]
_a : str = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
_a : List[str] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
_a : Any = datetime.datetime.now()
_a : Dict = datetime.timedelta(seconds=lowerCAmelCase_ )
for item in chunk_bytes_iter(lowerCAmelCase_ , lowerCAmelCase_ , stride=(stride_left, stride_right) , stream=lowerCAmelCase_ ):
# Put everything back in numpy scale
_a : List[Any] = np.frombuffer(item['raw'] , dtype=lowerCAmelCase_ )
_a : List[str] = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
_a : Union[str, Any] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False ) -> List[Any]:
_a : Tuple = B''
_a , _a : str = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
_a : Optional[int] = 0
for raw in iterator:
acc += raw
if stream and len(lowerCAmelCase_ ) < chunk_len:
_a : str = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(lowerCAmelCase_ ) >= chunk_len:
# We are flushing the accumulator
_a : Union[str, Any] = (_stride_left, stride_right)
_a : Dict = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
_a : List[str] = False
yield item
_a : int = stride_left
_a : List[Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(lowerCAmelCase_ ) > stride_left:
_a : str = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
_a : str = False
yield item
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
_a : Optional[Any] = 2**24 # 16Mo
try:
with subprocess.Popen(lowerCAmelCase_ , stdout=subprocess.PIPE , bufsize=lowerCAmelCase_ ) as ffmpeg_process:
while True:
_a : Any = ffmpeg_process.stdout.read(lowerCAmelCase_ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 89 | 0 |
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a__ :
def __init__( self : Optional[Any],_A : Any,_A : Tuple=13,_A : Optional[int]=30,_A : int=2,_A : Union[str, Any]=3,_A : int=True,_A : Optional[Any]=True,_A : Optional[Any]=32,_A : int=5,_A : List[str]=4,_A : Any=37,_A : List[str]="gelu",_A : Any=0.1,_A : Any=0.1,_A : Union[str, Any]=10,_A : Union[str, Any]=0.02,_A : List[str]=3,_A : Optional[Any]=0.6,_A : str=None,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = parent
SCREAMING_SNAKE_CASE_ : Optional[Any] = batch_size
SCREAMING_SNAKE_CASE_ : Any = image_size
SCREAMING_SNAKE_CASE_ : int = patch_size
SCREAMING_SNAKE_CASE_ : int = num_channels
SCREAMING_SNAKE_CASE_ : Tuple = is_training
SCREAMING_SNAKE_CASE_ : Any = use_labels
SCREAMING_SNAKE_CASE_ : int = hidden_size
SCREAMING_SNAKE_CASE_ : Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Any = num_attention_heads
SCREAMING_SNAKE_CASE_ : List[Any] = intermediate_size
SCREAMING_SNAKE_CASE_ : Dict = hidden_act
SCREAMING_SNAKE_CASE_ : Tuple = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : List[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE_ : List[str] = initializer_range
SCREAMING_SNAKE_CASE_ : Optional[int] = mask_ratio
SCREAMING_SNAKE_CASE_ : List[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
SCREAMING_SNAKE_CASE_ : int = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE_ : str = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : Dict = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : Tuple = ids_tensor([self.batch_size],self.type_sequence_label_size )
SCREAMING_SNAKE_CASE_ : Dict = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
return ViTMAEConfig(
image_size=self.image_size,patch_size=self.patch_size,num_channels=self.num_channels,hidden_size=self.hidden_size,num_hidden_layers=self.num_hidden_layers,num_attention_heads=self.num_attention_heads,intermediate_size=self.intermediate_size,hidden_act=self.hidden_act,hidden_dropout_prob=self.hidden_dropout_prob,attention_probs_dropout_prob=self.attention_probs_dropout_prob,is_decoder=_UpperCAmelCase,initializer_range=self.initializer_range,mask_ratio=self.mask_ratio,)
def __UpperCamelCase ( self : Union[str, Any],_A : Tuple,_A : str,_A : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = ViTMAEModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ : Tuple = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape,(self.batch_size, self.seq_length, self.hidden_size) )
def __UpperCamelCase ( self : Any,_A : Optional[Any],_A : int,_A : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = ViTMAEForPreTraining(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = model(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : List[str] = (self.image_size // self.patch_size) ** 2
SCREAMING_SNAKE_CASE_ : int = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape,(self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
SCREAMING_SNAKE_CASE_ : Optional[int] = 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ViTMAEForPreTraining(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : Optional[int] = model(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : int = self.patch_size**2
self.parent.assertEqual(result.logits.shape,(self.batch_size, num_patches, expected_num_channels) )
def __UpperCamelCase ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ : str = config_and_inputs
SCREAMING_SNAKE_CASE_ : List[str] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class a__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
A = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
A = {'feature-extraction': ViTMAEModel} if is_torch_available() else {}
A = False
A = False
A = False
A = False
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Any = ViTMAEModelTester(self )
SCREAMING_SNAKE_CASE_ : Optional[int] = ConfigTester(self,config_class=_UpperCAmelCase,has_text_modality=_UpperCAmelCase,hidden_size=37 )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="ViTMAE does not use inputs_embeds" )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
pass
def __UpperCamelCase ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : List[str] = model_class(_UpperCAmelCase )
self.assertIsInstance(model.get_input_embeddings(),(nn.Module) )
SCREAMING_SNAKE_CASE_ : int = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_UpperCAmelCase,nn.Linear ) )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : List[str] = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ : str = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ : str = ['pixel_values']
self.assertListEqual(arg_names[:1],_UpperCAmelCase )
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_UpperCAmelCase )
def __UpperCamelCase ( self : List[Any],_A : Dict,_A : List[str],_A : Tuple ):
"""simple docstring"""
np.random.seed(2 )
SCREAMING_SNAKE_CASE_ : Tuple = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
SCREAMING_SNAKE_CASE_ : Optional[int] = torch.from_numpy(_UpperCAmelCase )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
SCREAMING_SNAKE_CASE_ : Optional[int] = pt_noise
super().check_pt_tf_models(_UpperCAmelCase,_UpperCAmelCase,_UpperCAmelCase )
def __UpperCamelCase ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : List[str] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Any = model(**self._prepare_for_class(_UpperCAmelCase,_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE_ : str = outputs[0].cpu().numpy()
SCREAMING_SNAKE_CASE_ : Any = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Dict = model_class.from_pretrained(_UpperCAmelCase )
model.to(_UpperCAmelCase )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : str = model(**self._prepare_for_class(_UpperCAmelCase,_UpperCAmelCase ) )
# Make sure we don't have nans
SCREAMING_SNAKE_CASE_ : Any = after_outputs[0].cpu().numpy()
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : Optional[int] = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_UpperCAmelCase,1E-5 )
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def __UpperCamelCase ( self : str ):
"""simple docstring"""
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def __UpperCamelCase ( self : Dict ):
"""simple docstring"""
pass
@unittest.skip(
reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load\n to get deterministic results." )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
pass
@unittest.skip(reason="ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load" )
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __UpperCamelCase ( self : Optional[Any] ):
"""simple docstring"""
pass
@slow
def __UpperCamelCase ( self : Any ):
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = ViTMAEModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class a__ ( unittest.TestCase ):
@cached_property
def __UpperCamelCase ( self : Union[str, Any] ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained("facebook/vit-mae-base" ) if is_vision_available() else None
@slow
def __UpperCamelCase ( self : Optional[int] ):
"""simple docstring"""
np.random.seed(2 )
SCREAMING_SNAKE_CASE_ : int = ViTMAEForPreTraining.from_pretrained("facebook/vit-mae-base" ).to(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.default_image_processor
SCREAMING_SNAKE_CASE_ : List[Any] = prepare_img()
SCREAMING_SNAKE_CASE_ : List[Any] = image_processor(images=_UpperCAmelCase,return_tensors="pt" ).to(_UpperCAmelCase )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
SCREAMING_SNAKE_CASE_ : Dict = ViTMAEConfig()
SCREAMING_SNAKE_CASE_ : Any = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
SCREAMING_SNAKE_CASE_ : Any = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Dict = model(**_UpperCAmelCase,noise=torch.from_numpy(_UpperCAmelCase ).to(device=_UpperCAmelCase ) )
# verify the logits
SCREAMING_SNAKE_CASE_ : int = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape,_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : int = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3],expected_slice.to(_UpperCAmelCase ),atol=1E-4 ) )
| 18 |
'''simple docstring'''
__lowerCAmelCase = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]:
_a : List[Any] = set()
# keep track of all the paths to be checked
_a : Any = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
_a : Tuple = queue.pop(0 )
# get the last node from the path
_a : Tuple = path[-1]
if node not in explored:
_a : Optional[Any] = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
_a : Any = list(lowerCAmelCase_ )
new_path.append(lowerCAmelCase_ )
queue.append(lowerCAmelCase_ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(lowerCAmelCase_ )
# in case there's no path between the 2 nodes
return []
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
_a : Optional[int] = [start]
_a : Dict = set(lowerCAmelCase_ )
# Keep tab on distances from `start` node.
_a : Dict = {start: 0, target: -1}
while queue:
_a : List[str] = queue.pop(0 )
if node == target:
_a : Any = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(lowerCAmelCase_ )
queue.append(lowerCAmelCase_ )
_a : Any = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 89 | 0 |
import re
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> bool:
lowerCAmelCase = re.compile(R'''^(\+91[\-\s]?)?[0]?(91)?[789]\d{9}$''' )
if match := re.search(lowerCAmelCase_ , lowerCAmelCase_ ):
return match.string == phone
return False
if __name__ == "__main__":
print(indian_phone_validator('''+918827897895'''))
| 338 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase = {'''configuration_swin''': ['''SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SwinConfig''', '''SwinOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwinForImageClassification''',
'''SwinForMaskedImageModeling''',
'''SwinModel''',
'''SwinPreTrainedModel''',
'''SwinBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSwinForImageClassification''',
'''TFSwinForMaskedImageModeling''',
'''TFSwinModel''',
'''TFSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case : Optional[Any] = logging.get_logger(__name__)
__snake_case : List[Any] = {
'microsoft/cvt-13': 'https://huggingface.co/microsoft/cvt-13/resolve/main/config.json',
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class A__ ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = 'cvt'
def __init__( self: Union[str, Any] , _SCREAMING_SNAKE_CASE: Optional[Any]=3 , _SCREAMING_SNAKE_CASE: List[str]=[7, 3, 3] , _SCREAMING_SNAKE_CASE: List[str]=[4, 2, 2] , _SCREAMING_SNAKE_CASE: Optional[Any]=[2, 1, 1] , _SCREAMING_SNAKE_CASE: List[str]=[64, 192, 384] , _SCREAMING_SNAKE_CASE: Tuple=[1, 3, 6] , _SCREAMING_SNAKE_CASE: Optional[Any]=[1, 2, 10] , _SCREAMING_SNAKE_CASE: Optional[int]=[4.0, 4.0, 4.0] , _SCREAMING_SNAKE_CASE: Optional[Any]=[0.0, 0.0, 0.0] , _SCREAMING_SNAKE_CASE: List[str]=[0.0, 0.0, 0.0] , _SCREAMING_SNAKE_CASE: List[str]=[0.0, 0.0, 0.1] , _SCREAMING_SNAKE_CASE: Dict=[True, True, True] , _SCREAMING_SNAKE_CASE: Dict=[False, False, True] , _SCREAMING_SNAKE_CASE: str=["dw_bn", "dw_bn", "dw_bn"] , _SCREAMING_SNAKE_CASE: int=[3, 3, 3] , _SCREAMING_SNAKE_CASE: Optional[int]=[1, 1, 1] , _SCREAMING_SNAKE_CASE: List[str]=[2, 2, 2] , _SCREAMING_SNAKE_CASE: List[str]=[1, 1, 1] , _SCREAMING_SNAKE_CASE: Optional[Any]=[1, 1, 1] , _SCREAMING_SNAKE_CASE: Union[str, Any]=0.02 , _SCREAMING_SNAKE_CASE: int=1e-12 , **_SCREAMING_SNAKE_CASE: Union[str, Any] , ) -> Union[str, Any]:
"""simple docstring"""
super().__init__(**_UpperCAmelCase)
__lowerCAmelCase : Any = num_channels
__lowerCAmelCase : List[Any] = patch_sizes
__lowerCAmelCase : int = patch_stride
__lowerCAmelCase : List[Any] = patch_padding
__lowerCAmelCase : Any = embed_dim
__lowerCAmelCase : List[str] = num_heads
__lowerCAmelCase : Tuple = depth
__lowerCAmelCase : int = mlp_ratio
__lowerCAmelCase : List[Any] = attention_drop_rate
__lowerCAmelCase : Optional[int] = drop_rate
__lowerCAmelCase : str = drop_path_rate
__lowerCAmelCase : Tuple = qkv_bias
__lowerCAmelCase : str = cls_token
__lowerCAmelCase : Optional[int] = qkv_projection_method
__lowerCAmelCase : Any = kernel_qkv
__lowerCAmelCase : Any = padding_kv
__lowerCAmelCase : str = stride_kv
__lowerCAmelCase : Tuple = padding_q
__lowerCAmelCase : List[str] = stride_q
__lowerCAmelCase : List[str] = initializer_range
__lowerCAmelCase : Dict = layer_norm_eps | 269 |
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class __magic_name__ ( _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : Optional[int] = BarthezTokenizer
lowerCAmelCase : int = BarthezTokenizerFast
lowerCAmelCase : Dict = True
lowerCAmelCase : str = True
def __lowercase ( self : List[Any] ):
super().setUp()
_a : List[Any] = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname ,legacy_format=_UpperCAmelCase )
_a : Union[str, Any] = tokenizer
def __lowercase ( self : Tuple ):
_a : Optional[Any] = '<pad>'
_a : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) ,_UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) ,_UpperCAmelCase )
def __lowercase ( self : str ):
_a : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<s>' )
self.assertEqual(vocab_keys[1] ,'<pad>' )
self.assertEqual(vocab_keys[-1] ,'<mask>' )
self.assertEqual(len(_UpperCAmelCase ) ,101122 )
def __lowercase ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size ,101122 )
@require_torch
def __lowercase ( self : Dict ):
_a : Any = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_a : Dict = [0, 57, 3018, 70307, 91, 2]
_a : Dict = self.tokenizer(
_UpperCAmelCase ,max_length=len(_UpperCAmelCase ) ,padding=_UpperCAmelCase ,truncation=_UpperCAmelCase ,return_tensors='pt' )
self.assertIsInstance(_UpperCAmelCase ,_UpperCAmelCase )
self.assertEqual((2, 6) ,batch.input_ids.shape )
self.assertEqual((2, 6) ,batch.attention_mask.shape )
_a : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ):
if not self.test_rust_tokenizer:
return
_a : str = self.get_tokenizer()
_a : List[str] = self.get_rust_tokenizer()
_a : Dict = 'I was born in 92000, and this is falsé.'
_a : List[Any] = tokenizer.tokenize(_UpperCAmelCase )
_a : Tuple = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
_a : Optional[Any] = tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase )
_a : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
_a : Union[str, Any] = self.get_rust_tokenizer()
_a : Any = tokenizer.encode(_UpperCAmelCase )
_a : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
@slow
def __lowercase ( self : Optional[int] ):
# fmt: off
_a : Optional[int] = {'input_ids': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_a : Optional[Any] = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase ,model_name='moussaKam/mbarthez' ,revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' ,sequences=_UpperCAmelCase ,)
| 89 | 0 |
def A_ ( _lowerCAmelCase , _lowerCAmelCase ) -> str:
if not (isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and isinstance(lowerCAmelCase_ , lowerCAmelCase_ )):
raise ValueError("longest_common_substring() takes two strings for inputs" )
UpperCamelCase : List[str] = len(lowerCAmelCase_ )
UpperCamelCase : int = len(lowerCAmelCase_ )
UpperCamelCase : Any = [[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
UpperCamelCase : Any = 0
UpperCamelCase : List[str] = 0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
UpperCamelCase : Optional[Any] = 1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
UpperCamelCase : int = i
UpperCamelCase : Optional[int] = dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 52 |
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __magic_name__ ( _UpperCamelCase ):
@require_torch
def __lowercase ( self : Tuple ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a : Optional[int] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_a : List[str] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_a : Tuple = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_a : List[Any] = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_UpperCAmelCase )
BertModel.from_pretrained(_UpperCAmelCase )
BertTokenizer.from_pretrained(_UpperCAmelCase )
pipeline(task='fill-mask' ,model=_UpperCAmelCase )
# baseline - just load from_pretrained with normal network
_a : Optional[int] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_a : Tuple = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : int = '1'
_a : List[Any] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def __lowercase ( self : Any ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a : Dict = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_a : Optional[int] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_a : Optional[Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_a : int = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_UpperCAmelCase )
BertModel.from_pretrained(_UpperCAmelCase )
BertTokenizer.from_pretrained(_UpperCAmelCase )
pipeline(task='fill-mask' ,model=_UpperCAmelCase )
# baseline - just load from_pretrained with normal network
_a : Optional[int] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_a : str = self.get_env()
_a : Optional[Any] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def __lowercase ( self : List[str] ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a : Union[str, Any] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
_a : Optional[Any] = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
_a : str = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
_a : Optional[Any] = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_a : Dict = self.get_env()
_a : int = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
# next emulate no network
_a : List[Any] = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : int = '1'
_a : Any = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def __lowercase ( self : int ):
_a : Optional[Any] = '\nfrom transformers import pipeline\n '
_a : str = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
_a : List[str] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
_a : List[Any] = self.get_env()
_a : Dict = '1'
_a : Dict = [sys.executable, '-c', '\n'.join([load, mock, run] )]
_a : str = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,1 ,result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' ,result.stderr.decode().replace('\n' ,'' ) ,)
@require_torch
def __lowercase ( self : int ):
_a : Optional[int] = '\nfrom transformers import AutoModel\n '
_a : List[Any] = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
_a : Union[str, Any] = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_a : Tuple = self.get_env()
_a : List[str] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : Optional[Any] = '1'
_a : Any = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
| 89 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __snake_case ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase_ = StableDiffusionInstructPixaPixPipeline
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'height', 'width', 'cross_attention_kwargs'}
lowerCAmelCase_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
lowerCAmelCase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def __a ( self : List[str] ):
"""simple docstring"""
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=8 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=32 , )
SCREAMING_SNAKE_CASE__ = PNDMScheduler(skip_prk_steps=_UpperCAmelCase )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE__ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
SCREAMING_SNAKE_CASE__ = CLIPTextModel(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
SCREAMING_SNAKE_CASE__ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __a ( self : Optional[Any] , _lowercase : Union[str, Any] , _lowercase : str=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = floats_tensor((1, 3, 32, 32) , rng=random.Random(_UpperCAmelCase ) ).to(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
SCREAMING_SNAKE_CASE__ = Image.fromarray(np.uinta(_UpperCAmelCase ) ).convert("""RGB""" )
if str(_UpperCAmelCase ).startswith("""mps""" ):
SCREAMING_SNAKE_CASE__ = torch.manual_seed(_UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE__ = torch.Generator(device=_UpperCAmelCase ).manual_seed(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'image_guidance_scale': 1,
'output_type': 'numpy',
}
return inputs
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = StableDiffusionInstructPixaPixPipeline(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = sd_pipe(**_UpperCAmelCase ).images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.75_26, 0.37_50, 0.45_47, 0.61_17, 0.58_66, 0.50_16, 0.43_27, 0.56_42, 0.48_15] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = StableDiffusionInstructPixaPixPipeline(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = 'french fries'
SCREAMING_SNAKE_CASE__ = sd_pipe(**_UpperCAmelCase , negative_prompt=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = output.images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.75_11, 0.36_42, 0.45_53, 0.62_36, 0.57_97, 0.50_13, 0.43_43, 0.56_11, 0.48_31] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = StableDiffusionInstructPixaPixPipeline(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = [inputs['prompt']] * 2
SCREAMING_SNAKE_CASE__ = np.array(inputs["""image"""] ).astype(np.floataa ) / 2_55.0
SCREAMING_SNAKE_CASE__ = torch.from_numpy(_UpperCAmelCase ).unsqueeze(0 ).to(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = image / 2 + 0.5
SCREAMING_SNAKE_CASE__ = image.permute(0 , 3 , 1 , 2 )
SCREAMING_SNAKE_CASE__ = image.repeat(2 , 1 , 1 , 1 )
SCREAMING_SNAKE_CASE__ = sd_pipe(**_UpperCAmelCase ).images
SCREAMING_SNAKE_CASE__ = image[-1, -3:, -3:, -1]
assert image.shape == (2, 32, 32, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.58_12, 0.57_48, 0.52_22, 0.59_08, 0.56_95, 0.71_74, 0.68_04, 0.55_23, 0.55_79] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 'cpu' # ensure determinism for the device-dependent torch.Generator
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = EulerAncestralDiscreteScheduler(
beta_start=0.0_00_85 , beta_end=0.0_12 , beta_schedule="""scaled_linear""" )
SCREAMING_SNAKE_CASE__ = StableDiffusionInstructPixaPixPipeline(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = sd_pipe.to(_UpperCAmelCase )
sd_pipe.set_progress_bar_config(disable=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = sd_pipe(**_UpperCAmelCase ).images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ = [round(_UpperCAmelCase , 4 ) for x in image_slice.flatten().tolist()]
print(""",""".join([str(_UpperCAmelCase ) for x in slice] ) )
assert image.shape == (1, 32, 32, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.74_17, 0.38_42, 0.47_32, 0.57_76, 0.58_91, 0.51_39, 0.40_52, 0.56_73, 0.49_86] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def __a ( self : Optional[int] ):
"""simple docstring"""
super().test_inference_batch_single_identical(expected_max_diff=3E-3 )
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_dummy_components()
SCREAMING_SNAKE_CASE__ = StableDiffusionInstructPixaPixPipeline(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = VaeImageProcessor(do_resize=_UpperCAmelCase , do_normalize=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = pipe(**self.get_dummy_inputs_by_type(_UpperCAmelCase , input_image_type="""pt""" ) )[0]
SCREAMING_SNAKE_CASE__ = components['vae']
SCREAMING_SNAKE_CASE__ = self.get_dummy_inputs_by_type(_UpperCAmelCase , input_image_type="""pt""" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
SCREAMING_SNAKE_CASE__ = vae.encode(inputs[image_param] ).latent_dist.mode()
SCREAMING_SNAKE_CASE__ = pipe(**_UpperCAmelCase )[0]
SCREAMING_SNAKE_CASE__ = np.abs(out - out_latents_inputs ).max()
self.assertLess(_UpperCAmelCase , 1E-4 , """passing latents as image input generate different result from passing image""" )
@slow
@require_torch_gpu
class __snake_case ( unittest.TestCase ):
def __a ( self : List[str] ):
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __a ( self : int , _lowercase : Dict=0 ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = torch.manual_seed(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = load_image(
"""https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg""" )
SCREAMING_SNAKE_CASE__ = {
'prompt': 'turn him into a cyborg',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'image_guidance_scale': 1.0,
'output_type': 'numpy',
}
return inputs
def __a ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_UpperCAmelCase )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ = self.get_inputs()
SCREAMING_SNAKE_CASE__ = pipe(**_UpperCAmelCase ).images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.59_02, 0.60_15, 0.60_27, 0.59_83, 0.60_92, 0.60_61, 0.57_65, 0.57_85, 0.55_55] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ = self.get_inputs()
SCREAMING_SNAKE_CASE__ = pipe(**_UpperCAmelCase ).images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.65_78, 0.68_17, 0.69_72, 0.67_61, 0.68_56, 0.69_16, 0.64_28, 0.65_16, 0.63_01] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ = self.get_inputs()
SCREAMING_SNAKE_CASE__ = pipe(**_UpperCAmelCase ).images
SCREAMING_SNAKE_CASE__ = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_12, 5_12, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.38_28, 0.38_34, 0.38_18, 0.37_92, 0.38_65, 0.37_52, 0.37_92, 0.38_47, 0.37_53] )
assert np.abs(expected_slice - image_slice ).max() < 1E-3
def __a ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = 0
def callback_fn(_lowercase : int , _lowercase : int , _lowercase : torch.FloatTensor ) -> None:
SCREAMING_SNAKE_CASE__ = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
SCREAMING_SNAKE_CASE__ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
SCREAMING_SNAKE_CASE__ = latents[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ = np.array([-0.24_63, -0.46_44, -0.97_56, 1.51_76, 1.44_14, 0.78_66, 0.98_97, 0.85_21, 0.79_83] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
elif step == 2:
SCREAMING_SNAKE_CASE__ = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 64, 64)
SCREAMING_SNAKE_CASE__ = latents[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE__ = np.array([-0.26_44, -0.46_26, -0.96_53, 1.51_76, 1.45_51, 0.76_86, 0.98_05, 0.84_52, 0.81_15] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5E-2
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_UpperCAmelCase , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ = self.get_inputs()
pipe(**_UpperCAmelCase , callback=_UpperCAmelCase , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def __a ( self : List[str] ):
"""simple docstring"""
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
SCREAMING_SNAKE_CASE__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"""timbrooks/instruct-pix2pix""" , safety_checker=_UpperCAmelCase , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE__ = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
SCREAMING_SNAKE_CASE__ = self.get_inputs()
SCREAMING_SNAKE_CASE__ = pipe(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 10**9
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
SCREAMING_SNAKE_CASE__ = inputs['image'].resize((5_04, 5_04) )
SCREAMING_SNAKE_CASE__ = 'timbrooks/instruct-pix2pix'
SCREAMING_SNAKE_CASE__ = StableDiffusionInstructPixaPixPipeline.from_pretrained(
_UpperCAmelCase , safety_checker=_UpperCAmelCase , )
pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
pipe.enable_attention_slicing()
SCREAMING_SNAKE_CASE__ = pipe(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = output.images[0]
SCREAMING_SNAKE_CASE__ = image[2_55:2_58, 3_83:3_86, -1]
assert image.shape == (5_04, 5_04, 3)
SCREAMING_SNAKE_CASE__ = np.array([0.27_26, 0.25_29, 0.26_64, 0.26_55, 0.26_41, 0.26_42, 0.25_91, 0.26_49, 0.25_90] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
| 219 |
'''simple docstring'''
def __lowerCamelCase ( ) -> Tuple:
for n in range(1 , 1000000 ):
yield n * (n + 1) // 2
def __lowerCamelCase ( lowerCAmelCase_ ) -> List[Any]:
_a : Any = 1
_a : Tuple = 2
while i * i <= n:
_a : Tuple = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def __lowerCamelCase ( ) -> str:
return next(i for i in triangle_number_generator() if count_divisors(lowerCAmelCase_ ) > 500 )
if __name__ == "__main__":
print(solution())
| 89 | 0 |
"""simple docstring"""
import math
def lowercase__ ( _UpperCAmelCase ) -> bool:
'''simple docstring'''
lowercase : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(lowerCAmelCase_ )
def lowercase__ ( _UpperCAmelCase = 1 / 1_23_45 ) -> int:
'''simple docstring'''
lowercase : int = 0
lowercase : Optional[Any] = 0
lowercase : int = 3
while True:
lowercase : Tuple = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(lowerCAmelCase_ ):
lowercase : Union[str, Any] = int(lowerCAmelCase_ )
total_partitions += 1
if check_partition_perfect(lowerCAmelCase_ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(lowerCAmelCase_ )
integer += 1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 255 |
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class __magic_name__ ( _UpperCamelCase ):
def __init__( self : Optional[int] ,_UpperCAmelCase : Union[str, "sqlalchemy.sql.Selectable"] ,_UpperCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] ,_UpperCAmelCase : Optional[Features] = None ,_UpperCAmelCase : str = None ,_UpperCAmelCase : bool = False ,**_UpperCAmelCase : Dict ,):
super().__init__(features=_UpperCAmelCase ,cache_dir=_UpperCAmelCase ,keep_in_memory=_UpperCAmelCase ,**_UpperCAmelCase )
_a : Tuple = Sql(
cache_dir=_UpperCAmelCase ,features=_UpperCAmelCase ,sql=_UpperCAmelCase ,con=_UpperCAmelCase ,**_UpperCAmelCase ,)
def __lowercase ( self : Dict ):
_a : Optional[Any] = None
_a : Dict = None
_a : Dict = None
_a : Optional[int] = None
self.builder.download_and_prepare(
download_config=_UpperCAmelCase ,download_mode=_UpperCAmelCase ,verification_mode=_UpperCAmelCase ,base_path=_UpperCAmelCase ,)
# Build dataset for splits
_a : List[str] = self.builder.as_dataset(
split='train' ,verification_mode=_UpperCAmelCase ,in_memory=self.keep_in_memory )
return dataset
class __magic_name__ :
def __init__( self : Optional[int] ,_UpperCAmelCase : Dataset ,_UpperCAmelCase : str ,_UpperCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] ,_UpperCAmelCase : Optional[int] = None ,_UpperCAmelCase : Optional[int] = None ,**_UpperCAmelCase : Dict ,):
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" )
_a : Dict = dataset
_a : List[Any] = name
_a : Tuple = con
_a : Union[str, Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_a : List[Any] = num_proc
_a : Tuple = to_sql_kwargs
def __lowercase ( self : List[Any] ):
_a : Tuple = self.to_sql_kwargs.pop('sql' ,_UpperCAmelCase )
_a : str = self.to_sql_kwargs.pop('con' ,_UpperCAmelCase )
_a : Optional[Any] = self.to_sql_kwargs.pop('index' ,_UpperCAmelCase )
_a : Any = self._write(index=_UpperCAmelCase ,**self.to_sql_kwargs )
return written
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Dict ):
_a , _a , _a : Any = args
_a : Tuple = {**to_sql_kwargs, 'if_exists': 'append'} if offset > 0 else to_sql_kwargs
_a : Dict = query_table(
table=self.dataset.data ,key=slice(_UpperCAmelCase ,offset + self.batch_size ) ,indices=self.dataset._indices ,)
_a : Tuple = batch.to_pandas()
_a : Dict = df.to_sql(self.name ,self.con ,index=_UpperCAmelCase ,**_UpperCAmelCase )
return num_rows or len(_UpperCAmelCase )
def __lowercase ( self : int ,_UpperCAmelCase : Optional[int] ,**_UpperCAmelCase : List[Any] ):
_a : Union[str, Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 ,len(self.dataset ) ,self.batch_size ) ,unit='ba' ,disable=not logging.is_progress_bar_enabled() ,desc='Creating SQL from Arrow format' ,):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
_a , _a : List[Any] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql ,[(offset, index, to_sql_kwargs) for offset in range(0 ,_UpperCAmelCase ,_UpperCAmelCase )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit='ba' ,disable=not logging.is_progress_bar_enabled() ,desc='Creating SQL from Arrow format' ,):
written += num_rows
return written
| 89 | 0 |
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class UpperCamelCase__ ( _UpperCamelCase ):
"""simple docstring"""
def __init__( self , snake_case , snake_case , snake_case = None , snake_case = None , snake_case = False , **snake_case , ):
'''simple docstring'''
super().__init__(features=_UpperCAmelCase , cache_dir=_UpperCAmelCase , keep_in_memory=_UpperCAmelCase , **_UpperCAmelCase )
UpperCAmelCase : Tuple = Sql(
cache_dir=_UpperCAmelCase , features=_UpperCAmelCase , sql=_UpperCAmelCase , con=_UpperCAmelCase , **_UpperCAmelCase , )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = None
UpperCAmelCase : Dict = None
UpperCAmelCase : Dict = None
UpperCAmelCase : Optional[int] = None
self.builder.download_and_prepare(
download_config=_UpperCAmelCase , download_mode=_UpperCAmelCase , verification_mode=_UpperCAmelCase , base_path=_UpperCAmelCase , )
# Build dataset for splits
UpperCAmelCase : List[str] = self.builder.as_dataset(
split="train" , verification_mode=_UpperCAmelCase , in_memory=self.keep_in_memory )
return dataset
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , snake_case , snake_case , snake_case = None , snake_case = None , **snake_case , ):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f"num_proc {num_proc} must be an integer > 0." )
UpperCAmelCase : Dict = dataset
UpperCAmelCase : List[Any] = name
UpperCAmelCase : Tuple = con
UpperCAmelCase : Union[str, Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
UpperCAmelCase : List[Any] = num_proc
UpperCAmelCase : Tuple = to_sql_kwargs
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.to_sql_kwargs.pop("sql" , _UpperCAmelCase )
UpperCAmelCase : str = self.to_sql_kwargs.pop("con" , _UpperCAmelCase )
UpperCAmelCase : Optional[Any] = self.to_sql_kwargs.pop("index" , _UpperCAmelCase )
UpperCAmelCase : Any = self._write(index=_UpperCAmelCase , **self.to_sql_kwargs )
return written
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : Any = args
UpperCAmelCase : Tuple = {**to_sql_kwargs, 'if_exists': 'append'} if offset > 0 else to_sql_kwargs
UpperCAmelCase : Dict = query_table(
table=self.dataset.data , key=slice(_UpperCAmelCase , offset + self.batch_size ) , indices=self.dataset._indices , )
UpperCAmelCase : Tuple = batch.to_pandas()
UpperCAmelCase : Dict = df.to_sql(self.name , self.con , index=_UpperCAmelCase , **_UpperCAmelCase )
return num_rows or len(_UpperCAmelCase )
def A_ ( self , snake_case , **snake_case ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 , len(self.dataset ) , self.batch_size ) , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
UpperCAmelCase : List[Any] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql , [(offset, index, to_sql_kwargs) for offset in range(0 , _UpperCAmelCase , _UpperCAmelCase )] , ) , total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size , unit="ba" , disable=not logging.is_progress_bar_enabled() , desc="Creating SQL from Arrow format" , ):
written += num_rows
return written
| 311 |
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> np.ndarray:
_a : Union[str, Any] = cva.getAffineTransform(lowerCAmelCase_ , lowerCAmelCase_ )
return cva.warpAffine(lowerCAmelCase_ , lowerCAmelCase_ , (rows, cols) )
if __name__ == "__main__":
# read original image
__lowerCAmelCase = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
__lowerCAmelCase = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
__lowerCAmelCase , __lowerCAmelCase = gray_img.shape
# set different points to rotate image
__lowerCAmelCase = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
__lowerCAmelCase = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
__lowerCAmelCase = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
__lowerCAmelCase = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
__lowerCAmelCase = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
__lowerCAmelCase = plt.figure(1)
__lowerCAmelCase = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 89 | 0 |
'''simple docstring'''
a_ = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def _a( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : List[Any] =input('''Enter message: ''' )
SCREAMING_SNAKE_CASE__ : Optional[int] =input('''Enter key [alphanumeric]: ''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =input('''Encrypt/Decrypt [e/d]: ''' )
if mode.lower().startswith('''e''' ):
SCREAMING_SNAKE_CASE__ : Dict ='encrypt'
SCREAMING_SNAKE_CASE__ : Tuple =encrypt_message(lowerCAmelCase_, lowerCAmelCase_ )
elif mode.lower().startswith('''d''' ):
SCREAMING_SNAKE_CASE__ : List[Any] ='decrypt'
SCREAMING_SNAKE_CASE__ : List[Any] =decrypt_message(lowerCAmelCase_, lowerCAmelCase_ )
print(f"\n{mode.title()}ed message:" )
print(lowerCAmelCase_ )
def _a( UpperCamelCase__ : Dict, UpperCamelCase__ : List[str] ):
'''simple docstring'''
return translate_message(lowerCAmelCase_, lowerCAmelCase_, '''encrypt''' )
def _a( UpperCamelCase__ : List[Any], UpperCamelCase__ : List[Any] ):
'''simple docstring'''
return translate_message(lowerCAmelCase_, lowerCAmelCase_, '''decrypt''' )
def _a( UpperCamelCase__ : Optional[int], UpperCamelCase__ : Optional[Any], UpperCamelCase__ : Optional[int] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =[]
SCREAMING_SNAKE_CASE__ : List[str] =0
SCREAMING_SNAKE_CASE__ : Union[str, Any] =key.upper()
for symbol in message:
SCREAMING_SNAKE_CASE__ : Optional[Any] =LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(lowerCAmelCase_ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE__ : List[str] =0
else:
translated.append(lowerCAmelCase_ )
return "".join(lowerCAmelCase_ )
if __name__ == "__main__":
main() | 152 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
'''configuration_bigbird_pegasus''': [
'''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BigBirdPegasusConfig''',
'''BigBirdPegasusOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BigBirdPegasusForCausalLM''',
'''BigBirdPegasusForConditionalGeneration''',
'''BigBirdPegasusForQuestionAnswering''',
'''BigBirdPegasusForSequenceClassification''',
'''BigBirdPegasusModel''',
'''BigBirdPegasusPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89 | 0 |
import argparse
import os
import re
import torch
from flax.traverse_util import flatten_dict
from tax import checkpoints
from transformers import (
AutoTokenizer,
PixaStructConfig,
PixaStructForConditionalGeneration,
PixaStructImageProcessor,
PixaStructProcessor,
PixaStructTextConfig,
PixaStructVisionConfig,
)
def UpperCamelCase__ ( A__ ) -> List[str]:
snake_case__ : str = checkpoints.load_tax_checkpoint(lowerCAmelCase_ )
snake_case__ : str = flatten_dict(lowerCAmelCase_ )
return flax_params
def UpperCamelCase__ ( A__ ) -> str:
snake_case__ : Optional[Any] = {}
snake_case__ : str = {
'token_embedder': 'embeddings',
'encoder_norm': 'layernorm',
'kernel': 'weight',
'.out': '.output',
'scale': 'weight',
'embedders_0.pos_embedding': 'row_embedder.weight',
'embedders_1.pos_embedding': 'column_embedder.weight',
}
snake_case__ : str = {
'query': 'attention.query',
'key': 'attention.key',
'value': 'attention.value',
'output.dense': 'output',
'encoder_decoder_attention.o': 'encoder_decoder_attention.attention.o',
'pre_self_attention_layer_norm': 'self_attention.layer_norm',
'pre_cross_attention_layer_norm': 'encoder_decoder_attention.layer_norm',
'mlp.': 'mlp.DenseReluDense.',
'pre_mlp_layer_norm': 'mlp.layer_norm',
'self_attention.o': 'self_attention.attention.o',
'decoder.embeddings.embedding': 'decoder.embed_tokens.weight',
'decoder.relpos_bias.rel_embedding': 'decoder.layer.0.self_attention.attention.relative_attention_bias.weight',
'decoder.decoder_norm.weight': 'decoder.final_layer_norm.weight',
'decoder.logits_dense.weight': 'decoder.lm_head.weight',
}
for key in flax_dict.keys():
if "target" in key:
# remove the first prefix from the key
snake_case__ : Dict = '.'.join(key[1:] )
# rename the key
for old, new in CONVERSION_MAPPING.items():
snake_case__ : Union[str, Any] = new_key.replace(lowerCAmelCase_ , lowerCAmelCase_ )
if "decoder" in new_key:
for old, new in DECODER_CONVERSION_MAPPING.items():
snake_case__ : int = new_key.replace(lowerCAmelCase_ , lowerCAmelCase_ )
if "layers" in new_key and "decoder" not in new_key:
# use regex to replace the layer number
snake_case__ : Union[str, Any] = re.sub(r'layers_(\d+)' , r'layer.\1' , lowerCAmelCase_ )
snake_case__ : Optional[int] = new_key.replace('encoder' , 'encoder.encoder' )
elif "layers" in new_key and "decoder" in new_key:
# use regex to replace the layer number
snake_case__ : Optional[int] = re.sub(r'layers_(\d+)' , r'layer.\1' , lowerCAmelCase_ )
snake_case__ : Optional[Any] = flax_dict[key]
snake_case__ : int = {}
# convert converted_dict into torch format
for key in converted_dict.keys():
if ("embed_tokens" not in key) and ("embedder" not in key):
snake_case__ : Any = torch.from_numpy(converted_dict[key].T )
else:
snake_case__ : Optional[Any] = torch.from_numpy(converted_dict[key] )
return converted_torch_dict
def UpperCamelCase__ ( A__ , A__ , A__=False , A__=False ) -> Any:
snake_case__ : Tuple = get_flax_param(lowerCAmelCase_ )
if not use_large:
snake_case__ : Optional[Any] = PixaStructVisionConfig()
snake_case__ : Union[str, Any] = PixaStructTextConfig()
else:
snake_case__ : List[Any] = PixaStructVisionConfig(
hidden_size=1536 , d_ff=3968 , num_attention_heads=24 , num_hidden_layers=18 )
snake_case__ : List[Any] = PixaStructTextConfig(hidden_size=1536 , d_ff=3968 , num_heads=24 , num_layers=18 )
snake_case__ : Any = PixaStructConfig(
vision_config=encoder_config.to_dict() , text_config=decoder_config.to_dict() , is_vqa=lowerCAmelCase_ )
snake_case__ : Optional[Any] = PixaStructForConditionalGeneration(lowerCAmelCase_ )
snake_case__ : Union[str, Any] = rename_and_convert_flax_params(lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ )
snake_case__ : List[Any] = AutoTokenizer.from_pretrained('ybelkada/test-pix2struct-tokenizer' )
snake_case__ : Dict = PixaStructImageProcessor()
snake_case__ : Dict = PixaStructProcessor(image_processor=lowerCAmelCase_ , tokenizer=lowerCAmelCase_ )
if use_large:
snake_case__ : Optional[int] = 4096
snake_case__ : List[str] = True
# mkdir if needed
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
print('Model saved in {}'.format(lowerCAmelCase_ ) )
if __name__ == "__main__":
lowerCAmelCase__ : Tuple = argparse.ArgumentParser()
parser.add_argument('''--t5x_checkpoint_path''', default=None, type=str, help='''Path to the original T5x checkpoint.''')
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--use_large''', action='''store_true''', help='''Use large model.''')
parser.add_argument('''--is_vqa''', action='''store_true''', help='''Use large model.''')
lowerCAmelCase__ : str = parser.parse_args()
convert_pixastruct_original_pytorch_checkpoint_to_hf(
args.tax_checkpoint_path, args.pytorch_dump_folder_path, args.use_large
)
| 143 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1024 , lowerCAmelCase_=1024 , lowerCAmelCase_=False , **lowerCAmelCase_ ) -> List[Any]:
_a : str = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
_a : List[Any] = SeqaSeqDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , type_path='train' , **lowerCAmelCase_ )
_a : List[str] = tok.pad_token_id
def get_lens(lowerCAmelCase_ ):
_a : Dict = tqdm(
DataLoader(lowerCAmelCase_ , batch_size=512 , num_workers=8 , shuffle=lowerCAmelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
_a : Union[str, Any] = []
for batch in dl:
_a : Optional[Any] = batch['input_ids'].ne(lowerCAmelCase_ ).sum(1 ).tolist()
_a : Optional[Any] = batch['labels'].ne(lowerCAmelCase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
max_lens.append(max(lowerCAmelCase_ , lowerCAmelCase_ ) )
else:
max_lens.extend(lowerCAmelCase_ )
return max_lens
_a : str = get_lens(lowerCAmelCase_ )
_a : Optional[int] = SeqaSeqDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , type_path='val' , **lowerCAmelCase_ )
_a : Dict = get_lens(lowerCAmelCase_ )
pickle_save(lowerCAmelCase_ , train_ds.len_file )
pickle_save(lowerCAmelCase_ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 89 | 0 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
_a = logging.get_logger(__name__)
class __lowerCamelCase ( _UpperCamelCase):
"""simple docstring"""
UpperCamelCase__ = 'linear'
UpperCamelCase__ = 'cosine'
UpperCamelCase__ = 'cosine_with_restarts'
UpperCamelCase__ = 'polynomial'
UpperCamelCase__ = 'constant'
UpperCamelCase__ = 'constant_with_warmup'
UpperCamelCase__ = 'piecewise_constant'
def __A ( __lowerCAmelCase , __lowerCAmelCase = -1 )-> Union[str, Any]:
"""simple docstring"""
return LambdaLR(lowerCAmelCase_ , lambda __lowerCAmelCase : 1 , last_epoch=lowerCAmelCase_ )
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = -1 )-> List[str]:
"""simple docstring"""
def lr_lambda(__lowerCAmelCase ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1.0 , lowerCAmelCase_ ) )
return 1.0
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = -1 )-> List[str]:
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = step_rules.split(',' )
for rule_str in rule_list[:-1]:
_UpperCAmelCase = rule_str.split(':' )
_UpperCAmelCase = int(lowerCAmelCase_ )
_UpperCAmelCase = float(lowerCAmelCase_ )
_UpperCAmelCase = value
_UpperCAmelCase = float(rule_list[-1] )
def create_rules_function(__lowerCAmelCase , __lowerCAmelCase ):
def rule_func(__lowerCAmelCase ) -> float:
_UpperCAmelCase = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(lowerCAmelCase_ ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
_UpperCAmelCase = create_rules_function(lowerCAmelCase_ , lowerCAmelCase_ )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=-1 )-> List[str]:
"""simple docstring"""
def lr_lambda(__lowerCAmelCase ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 0.5 , __lowerCAmelCase = -1 )-> Optional[Any]:
"""simple docstring"""
def lr_lambda(__lowerCAmelCase ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
_UpperCAmelCase = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(lowerCAmelCase_ ) * 2.0 * progress )) )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 1 , __lowerCAmelCase = -1 )-> str:
"""simple docstring"""
def lr_lambda(__lowerCAmelCase ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
_UpperCAmelCase = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(lowerCAmelCase_ ) * progress) % 1.0) )) )
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase=1E-7 , __lowerCAmelCase=1.0 , __lowerCAmelCase=-1 )-> List[Any]:
"""simple docstring"""
_UpperCAmelCase = optimizer.defaults['lr']
if not (lr_init > lr_end):
raise ValueError(F"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__lowerCAmelCase ):
if current_step < num_warmup_steps:
return float(lowerCAmelCase_ ) / float(max(1 , lowerCAmelCase_ ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
_UpperCAmelCase = lr_init - lr_end
_UpperCAmelCase = num_training_steps - num_warmup_steps
_UpperCAmelCase = 1 - (current_step - num_warmup_steps) / decay_steps
_UpperCAmelCase = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_a = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = None , __lowerCAmelCase = 1 , __lowerCAmelCase = 1.0 , __lowerCAmelCase = -1 , )-> Any:
"""simple docstring"""
_UpperCAmelCase = SchedulerType(lowerCAmelCase_ )
_UpperCAmelCase = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(lowerCAmelCase_ , step_rules=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(F"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(F"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , num_cycles=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , power=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ , )
return schedule_func(
lowerCAmelCase_ , num_warmup_steps=lowerCAmelCase_ , num_training_steps=lowerCAmelCase_ , last_epoch=lowerCAmelCase_ )
| 39 |
'''simple docstring'''
from typing import Any
class __magic_name__ :
def __init__( self : List[Any] ,_UpperCAmelCase : Any ):
_a : List[Any] = data
_a : Union[str, Any] = None
def __repr__( self : Any ):
return F"""Node({self.data})"""
class __magic_name__ :
def __init__( self : int ):
_a : Tuple = None
def __iter__( self : str ):
_a : int = self.head
while node:
yield node.data
_a : Union[str, Any] = node.next
def __len__( self : Optional[Any] ):
return sum(1 for _ in self )
def __repr__( self : str ):
return "->".join([str(_UpperCAmelCase ) for item in self] )
def __getitem__( self : Tuple ,_UpperCAmelCase : int ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : Union[str, Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Any ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
_a : Any = self.head
for _ in range(_UpperCAmelCase ):
_a : Optional[Any] = current.next
_a : Optional[int] = data
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Any ):
self.insert_nth(len(self ) ,_UpperCAmelCase )
def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : Any ):
self.insert_nth(0 ,_UpperCAmelCase )
def __lowercase ( self : str ,_UpperCAmelCase : int ,_UpperCAmelCase : Any ):
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
_a : int = Node(_UpperCAmelCase )
if self.head is None:
_a : str = new_node
elif index == 0:
_a : List[str] = self.head # link new_node to head
_a : Union[str, Any] = new_node
else:
_a : int = self.head
for _ in range(index - 1 ):
_a : Union[str, Any] = temp.next
_a : List[str] = temp.next
_a : Optional[int] = new_node
def __lowercase ( self : Optional[int] ): # print every node data
print(self )
def __lowercase ( self : str ):
return self.delete_nth(0 )
def __lowercase ( self : str ): # delete from tail
return self.delete_nth(len(self ) - 1 )
def __lowercase ( self : List[str] ,_UpperCAmelCase : int = 0 ):
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
_a : Optional[Any] = self.head # default first node
if index == 0:
_a : int = self.head.next
else:
_a : int = self.head
for _ in range(index - 1 ):
_a : str = temp.next
_a : str = temp.next
_a : int = temp.next.next
return delete_node.data
def __lowercase ( self : List[Any] ):
return self.head is None
def __lowercase ( self : Tuple ):
_a : List[Any] = None
_a : Tuple = self.head
while current:
# Store the current node's next node.
_a : Dict = current.next
# Make the current node's next point backwards
_a : str = prev
# Make the previous node be the current node
_a : Tuple = current
# Make the current node the next node (to progress iteration)
_a : Optional[Any] = next_node
# Return prev in order to put the head at the end
_a : int = prev
def __lowerCamelCase ( ) -> None:
_a : List[str] = LinkedList()
assert linked_list.is_empty() is True
assert str(lowerCAmelCase_ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(lowerCAmelCase_ ) == i
linked_list.insert_nth(lowerCAmelCase_ , i + 1 )
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(lowerCAmelCase_ ) == 9
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
_a : Union[str, Any] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(-8 , 1 ) )
def __lowerCamelCase ( ) -> None:
_a : Dict = [
-9,
100,
Node(77345112 ),
'dlrow olleH',
7,
5555,
0,
-192.55_555,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
_a : List[Any] = LinkedList()
for i in test_input:
linked_list.insert_tail(lowerCAmelCase_ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(lowerCAmelCase_ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
_a : List[str] = linked_list.delete_head()
assert result == -9
assert (
str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
_a : Dict = linked_list.delete_tail()
assert result == 12.2
assert (
str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
_a : Optional[Any] = linked_list.delete_nth(10 )
assert result is None
assert (
str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(lowerCAmelCase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(lowerCAmelCase_ )
assert (
str(lowerCAmelCase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(lowerCAmelCase_ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def __lowerCamelCase ( ) -> Union[str, Any]:
from doctest import testmod
testmod()
_a : Optional[int] = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(lowerCAmelCase_ )
print('\nReading/changing Node data using indexing:' )
print(f"""Element at Position 1: {linked_list[1]}""" )
_a : Optional[Any] = input('Enter New Value: ' ).strip()
print('New list:' )
print(lowerCAmelCase_ )
print(f"""length of linked_list is : {len(lowerCAmelCase_ )}""" )
if __name__ == "__main__":
main()
| 89 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
A : Optional[Any] = {
'''configuration_swiftformer''': [
'''SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''SwiftFormerConfig''',
'''SwiftFormerOnnxConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A : str = [
'''SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwiftFormerForImageClassification''',
'''SwiftFormerModel''',
'''SwiftFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
A : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 274 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__lowerCAmelCase = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class __magic_name__ ( unittest.TestCase ):
def __lowercase ( self : str ,_UpperCAmelCase : Path ,_UpperCAmelCase : Union[str, None] = None ,_UpperCAmelCase : Union[List[str], None] = None ,_UpperCAmelCase : Union[str, List[str], None] = None ,_UpperCAmelCase : bool = True ,):
_a : Dict = [file for file in os.listdir(_UpperCAmelCase ) if os.path.isfile(os.path.join(_UpperCAmelCase ,_UpperCAmelCase ) )]
if identifier is not None:
_a : str = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
for n_ in n_identifier:
_a : int = [file for file in files if n_ not in file]
else:
_a : Optional[Any] = [file for file in files if n_identifier not in file]
_a : Dict = ignore_files or []
ignore_files.append('__init__.py' )
_a : List[str] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' ,_UpperCAmelCase )
if only_modules:
_a : Any = file.split('.' )[0]
try:
_a : Optional[int] = getattr(_UpperCAmelCase ,_UpperCAmelCase )
_a : Dict = doctest.DocTestSuite(_UpperCAmelCase )
_a : Optional[int] = unittest.TextTestRunner().run(_UpperCAmelCase )
self.assertIs(len(result.failures ) ,0 )
except AttributeError:
logger.info(F"""{module_identifier} is not a module.""" )
else:
_a : str = doctest.testfile(str('..' / directory / file ) ,optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed ,0 )
def __lowercase ( self : Union[str, Any] ):
_a : Optional[Any] = Path('src/transformers' )
_a : Optional[Any] = 'modeling'
_a : Union[str, Any] = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(_UpperCAmelCase ,identifier=_UpperCAmelCase ,ignore_files=_UpperCAmelCase )
def __lowercase ( self : int ):
_a : str = Path('src/transformers' )
_a : List[str] = 'tokenization'
self.analyze_directory(_UpperCAmelCase ,identifier=_UpperCAmelCase )
def __lowercase ( self : int ):
_a : Any = Path('src/transformers' )
_a : str = 'configuration'
self.analyze_directory(_UpperCAmelCase ,identifier=_UpperCAmelCase )
def __lowercase ( self : Dict ):
_a : Tuple = Path('src/transformers' )
_a : Optional[int] = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(_UpperCAmelCase ,n_identifier=_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ):
_a : Union[str, Any] = Path('docs/source' )
_a : List[str] = ['favicon.ico']
self.analyze_directory(_UpperCAmelCase ,ignore_files=_UpperCAmelCase ,only_modules=_UpperCAmelCase )
| 89 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
class a__ ( _UpperCamelCase ):
A = ['pixel_values']
def __init__( self : Union[str, Any],_A : bool = True,_A : Optional[Dict[str, int]] = None,_A : PILImageResampling = PILImageResampling.BICUBIC,_A : bool = True,_A : bool = True,_A : Union[int, float] = 1 / 255,_A : Dict[str, int] = None,_A : bool = True,_A : Optional[Union[float, List[float]]] = None,_A : Optional[Union[float, List[float]]] = None,**_A : int,):
"""simple docstring"""
super().__init__(**_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = size if size is not None else {'height': 224, 'width': 224}
SCREAMING_SNAKE_CASE_ : Optional[int] = get_size_dict(_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : int = crop_size if crop_size is not None else {'height': 224, 'width': 224}
SCREAMING_SNAKE_CASE_ : int = get_size_dict(_UpperCAmelCase,default_to_square=_UpperCAmelCase,param_name="crop_size" )
SCREAMING_SNAKE_CASE_ : Any = do_resize
SCREAMING_SNAKE_CASE_ : str = do_rescale
SCREAMING_SNAKE_CASE_ : Optional[Any] = do_normalize
SCREAMING_SNAKE_CASE_ : Optional[Any] = do_center_crop
SCREAMING_SNAKE_CASE_ : Any = crop_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = size
SCREAMING_SNAKE_CASE_ : Optional[int] = resample
SCREAMING_SNAKE_CASE_ : str = rescale_factor
SCREAMING_SNAKE_CASE_ : Optional[int] = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
SCREAMING_SNAKE_CASE_ : int = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def __UpperCamelCase ( self : List[str],_A : np.ndarray,_A : Dict[str, int],_A : PILImageResampling = PILImageResampling.BILINEAR,_A : Optional[Union[str, ChannelDimension]] = None,**_A : Optional[Any],):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = get_size_dict(_UpperCAmelCase )
if "shortest_edge" in size:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_resize_output_image_size(_UpperCAmelCase,size=size["shortest_edge"],default_to_square=_UpperCAmelCase )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
SCREAMING_SNAKE_CASE_ : str = (size['height'], size['width'])
else:
raise ValueError(F'Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}' )
return resize(_UpperCAmelCase,size=_UpperCAmelCase,resample=_UpperCAmelCase,data_format=_UpperCAmelCase,**_UpperCAmelCase )
def __UpperCamelCase ( self : List[str],_A : np.ndarray,_A : Dict[str, int],_A : Optional[Union[str, ChannelDimension]] = None,**_A : Any,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_size_dict(_UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(_UpperCAmelCase,size=(size["height"], size["width"]),data_format=_UpperCAmelCase,**_UpperCAmelCase )
def __UpperCamelCase ( self : Optional[Any],_A : np.ndarray,_A : float,_A : Optional[Union[str, ChannelDimension]] = None,**_A : str ):
"""simple docstring"""
return rescale(_UpperCAmelCase,scale=_UpperCAmelCase,data_format=_UpperCAmelCase,**_UpperCAmelCase )
def __UpperCamelCase ( self : List[Any],_A : np.ndarray,_A : Union[float, List[float]],_A : Union[float, List[float]],_A : Optional[Union[str, ChannelDimension]] = None,**_A : List[Any],):
"""simple docstring"""
return normalize(_UpperCAmelCase,mean=_UpperCAmelCase,std=_UpperCAmelCase,data_format=_UpperCAmelCase,**_UpperCAmelCase )
def __UpperCamelCase ( self : Optional[int],_A : ImageInput,_A : Optional[bool] = None,_A : Dict[str, int] = None,_A : PILImageResampling = None,_A : bool = None,_A : int = None,_A : Optional[bool] = None,_A : Optional[float] = None,_A : Optional[bool] = None,_A : Optional[Union[float, List[float]]] = None,_A : Optional[Union[float, List[float]]] = None,_A : Optional[Union[str, TensorType]] = None,_A : Union[str, ChannelDimension] = ChannelDimension.FIRST,**_A : Any,):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : str = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE_ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE_ : Optional[int] = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE_ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE_ : Dict = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = get_size_dict(_UpperCAmelCase,param_name="crop_size",default_to_square=_UpperCAmelCase )
SCREAMING_SNAKE_CASE_ : str = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE_ : Optional[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE_ : Union[str, Any] = size if size is not None else self.size
SCREAMING_SNAKE_CASE_ : Optional[int] = get_size_dict(_UpperCAmelCase )
if not is_batched(_UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ : Tuple = [images]
if not valid_images(_UpperCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE_ : int = [to_numpy_array(_UpperCAmelCase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE_ : Dict = [self.resize(image=_UpperCAmelCase,size=_UpperCAmelCase,resample=_UpperCAmelCase ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE_ : Tuple = [self.center_crop(image=_UpperCAmelCase,size=_UpperCAmelCase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE_ : Optional[Any] = [self.rescale(image=_UpperCAmelCase,scale=_UpperCAmelCase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE_ : str = [self.normalize(image=_UpperCAmelCase,mean=_UpperCAmelCase,std=_UpperCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE_ : Optional[Any] = [to_channel_dimension_format(_UpperCAmelCase,_UpperCAmelCase ) for image in images]
SCREAMING_SNAKE_CASE_ : Optional[Any] = {'pixel_values': images}
return BatchFeature(data=_UpperCAmelCase,tensor_type=_UpperCAmelCase )
| 18 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
__lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def __lowerCamelCase ( lowerCAmelCase_ ) -> Optional[Any]:
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
_a : List[Any] = model_type_to_module_name(lowerCAmelCase_ )
_a : Optional[Any] = importlib.import_module(f""".{module_name}""" , 'transformers.models' )
try:
return getattr(lowerCAmelCase_ , lowerCAmelCase_ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(lowerCAmelCase_ , '__name__' , lowerCAmelCase_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_a : Dict = importlib.import_module('transformers' )
if hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
return getattr(lowerCAmelCase_ , lowerCAmelCase_ )
return None
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , **lowerCAmelCase_ , ) -> Tuple:
_a : List[str] = get_file_from_repo(
lowerCAmelCase_ , lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , resume_download=lowerCAmelCase_ , proxies=lowerCAmelCase_ , use_auth_token=lowerCAmelCase_ , revision=lowerCAmelCase_ , local_files_only=lowerCAmelCase_ , )
if resolved_config_file is None:
logger.info(
'Could not locate the image processor configuration file, will try to use the model config instead.' )
return {}
with open(lowerCAmelCase_ , encoding='utf-8' ) as reader:
return json.load(lowerCAmelCase_ )
class __magic_name__ :
def __init__( self : List[str] ):
raise EnvironmentError(
'AutoImageProcessor is designed to be instantiated '
'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(_UpperCAmelCase )
def __lowercase ( cls : Dict ,_UpperCAmelCase : Union[str, Any] ,**_UpperCAmelCase : Optional[Any] ):
_a : Any = kwargs.pop('config' ,_UpperCAmelCase )
_a : Dict = kwargs.pop('trust_remote_code' ,_UpperCAmelCase )
_a : Any = True
_a , _a : Tuple = ImageProcessingMixin.get_image_processor_dict(_UpperCAmelCase ,**_UpperCAmelCase )
_a : List[Any] = config_dict.get('image_processor_type' ,_UpperCAmelCase )
_a : int = None
if "AutoImageProcessor" in config_dict.get('auto_map' ,{} ):
_a : Any = config_dict['auto_map']['AutoImageProcessor']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
_a : List[Any] = config_dict.pop('feature_extractor_type' ,_UpperCAmelCase )
if feature_extractor_class is not None:
logger.warning(
'Could not find image processor class in the image processor config or the model config. Loading'
' based on pattern matching with the model\'s feature extractor configuration.' )
_a : Optional[int] = feature_extractor_class.replace('FeatureExtractor' ,'ImageProcessor' )
if "AutoFeatureExtractor" in config_dict.get('auto_map' ,{} ):
_a : List[Any] = config_dict['auto_map']['AutoFeatureExtractor']
_a : List[str] = feature_extractor_auto_map.replace('FeatureExtractor' ,'ImageProcessor' )
logger.warning(
'Could not find image processor auto map in the image processor config or the model config.'
' Loading based on pattern matching with the model\'s feature extractor configuration.' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : Dict = AutoConfig.from_pretrained(_UpperCAmelCase ,**_UpperCAmelCase )
# It could be in `config.image_processor_type``
_a : Optional[int] = getattr(_UpperCAmelCase ,'image_processor_type' ,_UpperCAmelCase )
if hasattr(_UpperCAmelCase ,'auto_map' ) and "AutoImageProcessor" in config.auto_map:
_a : Union[str, Any] = config.auto_map['AutoImageProcessor']
if image_processor_class is not None:
_a : Optional[int] = image_processor_class_from_name(_UpperCAmelCase )
_a : List[str] = image_processor_auto_map is not None
_a : Optional[int] = image_processor_class is not None or type(_UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING
_a : Optional[int] = resolve_trust_remote_code(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
if has_remote_code and trust_remote_code:
_a : Dict = get_class_from_dynamic_module(
_UpperCAmelCase ,_UpperCAmelCase ,**_UpperCAmelCase )
_a : int = kwargs.pop('code_revision' ,_UpperCAmelCase )
if os.path.isdir(_UpperCAmelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(_UpperCAmelCase ,**_UpperCAmelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(_UpperCAmelCase ,**_UpperCAmelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(_UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING:
_a : Dict = IMAGE_PROCESSOR_MAPPING[type(_UpperCAmelCase )]
return image_processor_class.from_dict(_UpperCAmelCase ,**_UpperCAmelCase )
raise ValueError(
F"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """
F"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def __lowercase ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Dict ):
IMAGE_PROCESSOR_MAPPING.register(_UpperCAmelCase ,_UpperCAmelCase )
| 89 | 0 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
torch.manual_seed(0 )
lowerCAmelCase = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return model
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
torch.manual_seed(0 )
lowerCAmelCase = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''CrossAttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''CrossAttnUpBlock2D''') , cross_attention_dim=10 , )
return model
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->List[Any]:
torch.manual_seed(0 )
lowerCAmelCase = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''DownEncoderBlock2D''', '''DownEncoderBlock2D''') , up_block_types=('''UpDecoderBlock2D''', '''UpDecoderBlock2D''') , )
lowerCAmelCase = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=('''AttnDownBlock2D''', '''DownBlock2D''') , up_block_types=('''UpBlock2D''', '''AttnUpBlock2D''') , )
return vqvae, unet
@slow
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
lowerCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
lowerCAmelCase = DDPMScheduler()
lowerCAmelCase = AudioDiffusionPipeline(vqvae=_UpperCAmelCase , unet=self.dummy_unet , mel=_UpperCAmelCase , scheduler=_UpperCAmelCase )
lowerCAmelCase = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowerCAmelCase = torch.Generator(device=_UpperCAmelCase ).manual_seed(42 )
lowerCAmelCase = pipe(generator=_UpperCAmelCase , steps=4 )
lowerCAmelCase = output.audios[0]
lowerCAmelCase = output.images[0]
lowerCAmelCase = torch.Generator(device=_UpperCAmelCase ).manual_seed(42 )
lowerCAmelCase = pipe(generator=_UpperCAmelCase , steps=4 , return_dict=_UpperCAmelCase )
lowerCAmelCase = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
lowerCAmelCase = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
lowerCAmelCase = np.frombuffer(image_from_tuple.tobytes() , dtype='''uint8''' )[:10]
lowerCAmelCase = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
lowerCAmelCase = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
lowerCAmelCase = DDIMScheduler()
lowerCAmelCase = self.dummy_vqvae_and_unet
lowerCAmelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=_UpperCAmelCase , scheduler=_UpperCAmelCase )
lowerCAmelCase = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
np.random.seed(0 )
lowerCAmelCase = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
lowerCAmelCase = torch.Generator(device=_UpperCAmelCase ).manual_seed(42 )
lowerCAmelCase = pipe(raw_audio=_UpperCAmelCase , generator=_UpperCAmelCase , start_step=5 , steps=10 )
lowerCAmelCase = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
lowerCAmelCase = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
lowerCAmelCase = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
lowerCAmelCase = self.dummy_unet_condition
lowerCAmelCase = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=_UpperCAmelCase , mel=_UpperCAmelCase , scheduler=_UpperCAmelCase )
lowerCAmelCase = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
np.random.seed(0 )
lowerCAmelCase = torch.rand((1, 1, 10) )
lowerCAmelCase = pipe(generator=_UpperCAmelCase , encoding=_UpperCAmelCase )
lowerCAmelCase = output.images[0]
lowerCAmelCase = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
lowerCAmelCase = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class lowercase_ ( unittest.TestCase ):
"""simple docstring"""
def SCREAMING_SNAKE_CASE_ ( self ) ->str:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE_ ( self ) ->Optional[int]:
lowerCAmelCase = torch_device
lowerCAmelCase = DiffusionPipeline.from_pretrained('''teticio/audio-diffusion-ddim-256''' )
lowerCAmelCase = pipe.to(_UpperCAmelCase )
pipe.set_progress_bar_config(disable=_UpperCAmelCase )
lowerCAmelCase = torch.Generator(device=_UpperCAmelCase ).manual_seed(42 )
lowerCAmelCase = pipe(generator=_UpperCAmelCase )
lowerCAmelCase = output.audios[0]
lowerCAmelCase = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
lowerCAmelCase = np.frombuffer(image.tobytes() , dtype='''uint8''' )[:10]
lowerCAmelCase = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 338 |
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__lowerCAmelCase = None
__lowerCAmelCase = '''<''' if sys.byteorder == '''little''' else '''>'''
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__lowerCAmelCase = [
np.dtype('''|b1'''),
np.dtype('''|u1'''),
np.dtype('''<u2'''),
np.dtype('''>u2'''),
np.dtype('''<i2'''),
np.dtype('''>i2'''),
np.dtype('''<u4'''),
np.dtype('''>u4'''),
np.dtype('''<i4'''),
np.dtype('''>i4'''),
np.dtype('''<f4'''),
np.dtype('''>f4'''),
np.dtype('''<f8'''),
np.dtype('''>f8'''),
]
@dataclass
class __magic_name__ :
lowerCAmelCase : bool = True
lowerCAmelCase : Optional[str] = None
# Automatically constructed
lowerCAmelCase : ClassVar[str] = "PIL.Image.Image"
lowerCAmelCase : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
lowerCAmelCase : str = field(default='Image' , init=_UpperCamelCase , repr=_UpperCamelCase )
def __call__( self : Union[str, Any] ):
return self.pa_type
def __lowercase ( self : Any ,_UpperCAmelCase : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : Optional[Any] = np.array(_UpperCAmelCase )
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
return {"path": value, "bytes": None}
elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
return {"path": None, "bytes": value}
elif isinstance(_UpperCAmelCase ,np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase ,PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_UpperCAmelCase )
elif value.get('path' ) is not None and os.path.isfile(value['path'] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('path' )}
elif value.get('bytes' ) is not None or value.get('path' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('bytes' ), "path": value.get('path' )}
else:
raise ValueError(
F"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : dict ,_UpperCAmelCase : Optional[int]=None ):
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Image(decode=True) instead.' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support decoding images, please install \'Pillow\'.' )
if token_per_repo_id is None:
_a : Dict = {}
_a , _a : str = value['path'], value['bytes']
if bytes_ is None:
if path is None:
raise ValueError(F"""An image should have one of 'path' or 'bytes' but both are None in {value}.""" )
else:
if is_local_path(_UpperCAmelCase ):
_a : Any = PIL.Image.open(_UpperCAmelCase )
else:
_a : List[Any] = path.split('::' )[-1]
try:
_a : str = string_to_dict(_UpperCAmelCase ,config.HUB_DATASETS_URL )['repo_id']
_a : Optional[Any] = token_per_repo_id.get(_UpperCAmelCase )
except ValueError:
_a : int = None
with xopen(_UpperCAmelCase ,'rb' ,use_auth_token=_UpperCAmelCase ) as f:
_a : Tuple = BytesIO(f.read() )
_a : Union[str, Any] = PIL.Image.open(bytes_ )
else:
_a : Optional[int] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def __lowercase ( self : int ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('binary' ),
"path": Value('string' ),
}
)
def __lowercase ( self : str ,_UpperCAmelCase : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
if pa.types.is_string(storage.type ):
_a : Union[str, Any] = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.binary() )
_a : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, storage] ,['bytes', 'path'] ,mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_a : List[str] = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() )
_a : Any = pa.StructArray.from_arrays([storage, path_array] ,['bytes', 'path'] ,mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('bytes' ) >= 0:
_a : Union[str, Any] = storage.field('bytes' )
else:
_a : Tuple = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.binary() )
if storage.type.get_field_index('path' ) >= 0:
_a : Union[str, Any] = storage.field('path' )
else:
_a : Dict = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() )
_a : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] ,['bytes', 'path'] ,mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
_a : List[str] = pa.array(
[encode_np_array(np.array(_UpperCAmelCase ) )['bytes'] if arr is not None else None for arr in storage.to_pylist()] ,type=pa.binary() ,)
_a : int = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() )
_a : Optional[Any] = pa.StructArray.from_arrays(
[bytes_array, path_array] ,['bytes', 'path'] ,mask=bytes_array.is_null() )
return array_cast(_UpperCAmelCase ,self.pa_type )
def __lowercase ( self : Dict ,_UpperCAmelCase : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(_UpperCAmelCase : Tuple ):
with xopen(_UpperCAmelCase ,'rb' ) as f:
_a : int = f.read()
return bytes_
_a : Any = pa.array(
[
(path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] ,type=pa.binary() ,)
_a : Optional[Any] = pa.array(
[os.path.basename(_UpperCAmelCase ) if path is not None else None for path in storage.field('path' ).to_pylist()] ,type=pa.string() ,)
_a : Dict = pa.StructArray.from_arrays([bytes_array, path_array] ,['bytes', 'path'] ,mask=bytes_array.is_null() )
return array_cast(_UpperCAmelCase ,self.pa_type )
def __lowerCamelCase ( ) -> List[str]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
_a : Dict = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def __lowerCamelCase ( lowerCAmelCase_ ) -> bytes:
_a : Optional[int] = BytesIO()
if image.format in list_image_compression_formats():
_a : Optional[Any] = image.format
else:
_a : str = 'PNG' if image.mode in ['1', 'L', 'LA', 'RGB', 'RGBA'] else 'TIFF'
image.save(lowerCAmelCase_ , format=lowerCAmelCase_ )
return buffer.getvalue()
def __lowerCamelCase ( lowerCAmelCase_ ) -> dict:
if hasattr(lowerCAmelCase_ , 'filename' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(lowerCAmelCase_ )}
def __lowerCamelCase ( lowerCAmelCase_ ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
_a : List[Any] = array.dtype
_a : Optional[int] = dtype.byteorder if dtype.byteorder != '=' else _NATIVE_BYTEORDER
_a : Union[str, Any] = dtype.kind
_a : Union[str, Any] = dtype.itemsize
_a : List[Any] = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
_a : Optional[int] = np.dtype('|u1' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" )
if dtype is not dest_dtype:
warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
_a : Union[str, Any] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
_a : str = dtype_byteorder + dtype_kind + str(lowerCAmelCase_ )
_a : List[Any] = np.dtype(lowerCAmelCase_ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" )
_a : Union[str, Any] = PIL.Image.fromarray(array.astype(lowerCAmelCase_ ) )
return {"path": None, "bytes": image_to_bytes(lowerCAmelCase_ )}
def __lowerCamelCase ( lowerCAmelCase_ ) -> List[dict]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if objs:
_a , _a : Optional[Any] = first_non_null_value(lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(lowerCAmelCase_ , np.ndarray ):
_a : List[str] = no_op_if_value_is_null(lowerCAmelCase_ )
return [obj_to_image_dict_func(lowerCAmelCase_ ) for obj in objs]
elif isinstance(lowerCAmelCase_ , PIL.Image.Image ):
_a : List[str] = no_op_if_value_is_null(lowerCAmelCase_ )
return [obj_to_image_dict_func(lowerCAmelCase_ ) for obj in objs]
else:
return objs
else:
return objs
| 89 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__snake_case : List[str] = {
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Union[str, Any] = ['OwlViTFeatureExtractor']
__snake_case : str = ['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case : Any = [
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
__snake_case : Union[str, Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 269 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str | Literal[False]:
_a : Optional[int] = list(lowerCAmelCase_ )
_a : Optional[Any] = list(lowerCAmelCase_ )
_a : Union[str, Any] = 0
for i in range(len(lowerCAmelCase_ ) ):
if lista[i] != lista[i]:
count += 1
_a : Optional[int] = '_'
if count > 1:
return False
else:
return "".join(lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ ) -> list[str]:
_a : Optional[int] = []
while True:
_a : Any = ['$'] * len(lowerCAmelCase_ )
_a : List[str] = []
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
_a : Optional[int] = compare_string(binary[i] , binary[j] )
if k is False:
_a : Optional[Any] = '*'
_a : Optional[Any] = '*'
temp.append('X' )
for i in range(len(lowerCAmelCase_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(lowerCAmelCase_ ) == 0:
return pi
_a : Any = list(set(lowerCAmelCase_ ) )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]:
_a : int = []
for minterm in minterms:
_a : Optional[int] = ''
for _ in range(lowerCAmelCase_ ):
_a : Union[str, Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(lowerCAmelCase_ )
return temp
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> bool:
_a : int = list(lowerCAmelCase_ )
_a : Union[str, Any] = list(lowerCAmelCase_ )
_a : str = 0
for i in range(len(lowerCAmelCase_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]:
_a : List[Any] = []
_a : Optional[Any] = [0] * len(lowerCAmelCase_ )
for i in range(len(chart[0] ) ):
_a : Union[str, Any] = 0
_a : int = -1
for j in range(len(lowerCAmelCase_ ) ):
if chart[j][i] == 1:
count += 1
_a : int = j
if count == 1:
_a : List[Any] = 1
for i in range(len(lowerCAmelCase_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(lowerCAmelCase_ ) ):
_a : Any = 0
temp.append(prime_implicants[i] )
while True:
_a : Union[str, Any] = 0
_a : List[Any] = -1
_a : str = 0
for i in range(len(lowerCAmelCase_ ) ):
_a : Union[str, Any] = chart[i].count(1 )
if count_n > max_n:
_a : Any = count_n
_a : int = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(lowerCAmelCase_ ) ):
_a : List[str] = 0
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[list[int]]:
_a : int = [[0 for x in range(len(lowerCAmelCase_ ) )] for x in range(len(lowerCAmelCase_ ) )]
for i in range(len(lowerCAmelCase_ ) ):
_a : str = prime_implicants[i].count('_' )
for j in range(len(lowerCAmelCase_ ) ):
if is_for_table(prime_implicants[i] , binary[j] , lowerCAmelCase_ ):
_a : Optional[Any] = 1
return chart
def __lowerCamelCase ( ) -> None:
_a : Optional[int] = int(input('Enter the no. of variables\n' ) )
_a : List[Any] = [
float(lowerCAmelCase_ )
for x in input(
'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split()
]
_a : List[str] = decimal_to_binary(lowerCAmelCase_ , lowerCAmelCase_ )
_a : Dict = check(lowerCAmelCase_ )
print('Prime Implicants are:' )
print(lowerCAmelCase_ )
_a : List[Any] = prime_implicant_chart(lowerCAmelCase_ , lowerCAmelCase_ )
_a : int = selection(lowerCAmelCase_ , lowerCAmelCase_ )
print('Essential Prime Implicants are:' )
print(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 89 | 0 |
__lowerCamelCase : Union[str, Any] = """
# Installazione di Transformers
! pip install transformers datasets
# Per installare dalla fonte invece dell\'ultima versione rilasciata, commenta il comando sopra e
# rimuovi la modalità commento al comando seguente.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
__lowerCamelCase : List[str] = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
__lowerCamelCase : str = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 52 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
'''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''],
'''tokenization_cpmant''': ['''CpmAntTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CpmAntForCausalLM''',
'''CpmAntModel''',
'''CpmAntPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89 | 0 |
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
__lowerCamelCase : str = logging.getLogger(__name__)
@dataclass
class __snake_case :
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
@dataclass
class __snake_case :
lowerCAmelCase_ = 42
lowerCAmelCase_ = 42
lowerCAmelCase_ = None
lowerCAmelCase_ = None
class __snake_case ( _UpperCamelCase ):
lowerCAmelCase_ = 'train'
lowerCAmelCase_ = 'dev'
lowerCAmelCase_ = 'test'
class __snake_case :
@staticmethod
def __a ( _lowercase : List[str] , _lowercase : Union[Split, str] ):
"""simple docstring"""
raise NotImplementedError
@staticmethod
def __a ( _lowercase : str ):
"""simple docstring"""
raise NotImplementedError
@staticmethod
def __a ( _lowercase : List[InputExample] , _lowercase : List[str] , _lowercase : int , _lowercase : PreTrainedTokenizer , _lowercase : Dict=False , _lowercase : int="[CLS]" , _lowercase : List[Any]=1 , _lowercase : Optional[int]="[SEP]" , _lowercase : Any=False , _lowercase : int=False , _lowercase : Tuple=0 , _lowercase : Any=0 , _lowercase : int=-1_00 , _lowercase : List[Any]=0 , _lowercase : str=True , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = {label: i for i, label in enumerate(_UpperCAmelCase )}
SCREAMING_SNAKE_CASE__ = []
for ex_index, example in enumerate(_UpperCAmelCase ):
if ex_index % 1_00_00 == 0:
logger.info("""Writing example %d of %d""" , _UpperCAmelCase , len(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
for word, label in zip(example.words , example.labels ):
SCREAMING_SNAKE_CASE__ = tokenizer.tokenize(_UpperCAmelCase )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(_UpperCAmelCase ) > 0:
tokens.extend(_UpperCAmelCase )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(_UpperCAmelCase ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
SCREAMING_SNAKE_CASE__ = tokenizer.num_special_tokens_to_add()
if len(_UpperCAmelCase ) > max_seq_length - special_tokens_count:
SCREAMING_SNAKE_CASE__ = tokens[: (max_seq_length - special_tokens_count)]
SCREAMING_SNAKE_CASE__ = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
SCREAMING_SNAKE_CASE__ = [sequence_a_segment_id] * len(_UpperCAmelCase )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
SCREAMING_SNAKE_CASE__ = [cls_token] + tokens
SCREAMING_SNAKE_CASE__ = [pad_token_label_id] + label_ids
SCREAMING_SNAKE_CASE__ = [cls_token_segment_id] + segment_ids
SCREAMING_SNAKE_CASE__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
SCREAMING_SNAKE_CASE__ = [1 if mask_padding_with_zero else 0] * len(_UpperCAmelCase )
# Zero-pad up to the sequence length.
SCREAMING_SNAKE_CASE__ = max_seq_length - len(_UpperCAmelCase )
if pad_on_left:
SCREAMING_SNAKE_CASE__ = ([pad_token] * padding_length) + input_ids
SCREAMING_SNAKE_CASE__ = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
SCREAMING_SNAKE_CASE__ = ([pad_token_segment_id] * padding_length) + segment_ids
SCREAMING_SNAKE_CASE__ = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(_UpperCAmelCase ) == max_seq_length
assert len(_UpperCAmelCase ) == max_seq_length
assert len(_UpperCAmelCase ) == max_seq_length
assert len(_UpperCAmelCase ) == max_seq_length
if ex_index < 5:
logger.info("""*** Example ***""" )
logger.info("""guid: %s""" , example.guid )
logger.info("""tokens: %s""" , """ """.join([str(_UpperCAmelCase ) for x in tokens] ) )
logger.info("""input_ids: %s""" , """ """.join([str(_UpperCAmelCase ) for x in input_ids] ) )
logger.info("""input_mask: %s""" , """ """.join([str(_UpperCAmelCase ) for x in input_mask] ) )
logger.info("""segment_ids: %s""" , """ """.join([str(_UpperCAmelCase ) for x in segment_ids] ) )
logger.info("""label_ids: %s""" , """ """.join([str(_UpperCAmelCase ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
SCREAMING_SNAKE_CASE__ = None
features.append(
InputFeatures(
input_ids=_UpperCAmelCase , attention_mask=_UpperCAmelCase , token_type_ids=_UpperCAmelCase , label_ids=_UpperCAmelCase ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class __snake_case ( _UpperCamelCase ):
lowerCAmelCase_ = 42
lowerCAmelCase_ = nn.CrossEntropyLoss().ignore_index
def __init__( self : int , _lowercase : TokenClassificationTask , _lowercase : str , _lowercase : PreTrainedTokenizer , _lowercase : List[str] , _lowercase : str , _lowercase : Optional[int] = None , _lowercase : List[Any]=False , _lowercase : Split = Split.train , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = os.path.join(
_UpperCAmelCase , """cached_{}_{}_{}""".format(mode.value , tokenizer.__class__.__name__ , str(_UpperCAmelCase ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
SCREAMING_SNAKE_CASE__ = cached_features_file + '.lock'
with FileLock(_UpperCAmelCase ):
if os.path.exists(_UpperCAmelCase ) and not overwrite_cache:
logger.info(f"""Loading features from cached file {cached_features_file}""" )
SCREAMING_SNAKE_CASE__ = torch.load(_UpperCAmelCase )
else:
logger.info(f"""Creating features from dataset file at {data_dir}""" )
SCREAMING_SNAKE_CASE__ = token_classification_task.read_examples_from_file(_UpperCAmelCase , _UpperCAmelCase )
# TODO clean up all this to leverage built-in features of tokenizers
SCREAMING_SNAKE_CASE__ = token_classification_task.convert_examples_to_features(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_UpperCAmelCase , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f"""Saving features into cached file {cached_features_file}""" )
torch.save(self.features , _UpperCAmelCase )
def __len__( self : Any ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : Tuple , _lowercase : Dict ):
"""simple docstring"""
return self.features[i]
if is_tf_available():
import tensorflow as tf
class __snake_case :
lowerCAmelCase_ = 42
lowerCAmelCase_ = -1_00
def __init__( self : str , _lowercase : TokenClassificationTask , _lowercase : str , _lowercase : PreTrainedTokenizer , _lowercase : List[str] , _lowercase : str , _lowercase : Optional[int] = None , _lowercase : int=False , _lowercase : Split = Split.train , ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = token_classification_task.read_examples_from_file(_UpperCAmelCase , _UpperCAmelCase )
# TODO clean up all this to leverage built-in features of tokenizers
SCREAMING_SNAKE_CASE__ = token_classification_task.convert_examples_to_features(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=_UpperCAmelCase , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
SCREAMING_SNAKE_CASE__ = tf.data.Dataset.from_generator(
_UpperCAmelCase , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa}, tf.intaa) , (
{"""input_ids""": tf.TensorShape([None] ), """attention_mask""": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
SCREAMING_SNAKE_CASE__ = tf.data.Dataset.from_generator(
_UpperCAmelCase , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa}, tf.intaa) , (
{
"""input_ids""": tf.TensorShape([None] ),
"""attention_mask""": tf.TensorShape([None] ),
"""token_type_ids""": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : Optional[Any] ):
"""simple docstring"""
return len(self.features )
def __getitem__( self : str , _lowercase : int ):
"""simple docstring"""
return self.features[i]
| 219 |
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : str = LayoutLMTokenizer
lowerCAmelCase : Tuple = LayoutLMTokenizerFast
lowerCAmelCase : List[Any] = True
lowerCAmelCase : int = True
def __lowercase ( self : Dict ):
super().setUp()
_a : int = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_a : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __lowercase ( self : Dict ,**_UpperCAmelCase : List[str] ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname ,**_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : Tuple ):
_a : Optional[int] = 'UNwant\u00E9d,running'
_a : List[Any] = 'unwanted, running'
return input_text, output_text
def __lowercase ( self : Optional[int] ):
_a : Optional[Any] = self.tokenizer_class(self.vocab_file )
_a : Optional[Any] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_UpperCAmelCase ,['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) ,[7, 4, 5, 10, 8, 9] )
def __lowercase ( self : Optional[int] ):
pass
| 89 | 0 |
"""simple docstring"""
from collections import namedtuple
import requests
from lxml import html # type: ignore
_UpperCamelCase: Tuple = namedtuple('covid_data', 'cases deaths recovered')
def lowercase__ ( _UpperCAmelCase = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
'''simple docstring'''
lowercase : Optional[int] = '//div[@class = "maincounter-number"]/span/text()'
return covid_data(*html.fromstring(requests.get(lowerCAmelCase_ ).content ).xpath(lowerCAmelCase_ ) )
_UpperCamelCase: Optional[int] = 'Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}'
print(fmt.format(*covid_stats()))
| 255 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Any = 'conditional_detr'
lowerCAmelCase : List[str] = ['past_key_values']
lowerCAmelCase : Optional[int] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Optional[int] ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : List[Any]=3 ,_UpperCAmelCase : List[Any]=300 ,_UpperCAmelCase : Dict=6 ,_UpperCAmelCase : List[str]=2048 ,_UpperCAmelCase : Optional[int]=8 ,_UpperCAmelCase : List[Any]=6 ,_UpperCAmelCase : Optional[int]=2048 ,_UpperCAmelCase : Dict=8 ,_UpperCAmelCase : int=0.0 ,_UpperCAmelCase : Optional[Any]=0.0 ,_UpperCAmelCase : Optional[Any]=True ,_UpperCAmelCase : str="relu" ,_UpperCAmelCase : Tuple=256 ,_UpperCAmelCase : Optional[int]=0.1 ,_UpperCAmelCase : str=0.0 ,_UpperCAmelCase : Optional[int]=0.0 ,_UpperCAmelCase : Union[str, Any]=0.02 ,_UpperCAmelCase : List[str]=1.0 ,_UpperCAmelCase : Any=False ,_UpperCAmelCase : int="sine" ,_UpperCAmelCase : List[str]="resnet50" ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : str=False ,_UpperCAmelCase : str=2 ,_UpperCAmelCase : int=5 ,_UpperCAmelCase : Optional[int]=2 ,_UpperCAmelCase : str=1 ,_UpperCAmelCase : Union[str, Any]=1 ,_UpperCAmelCase : List[str]=2 ,_UpperCAmelCase : Union[str, Any]=5 ,_UpperCAmelCase : List[Any]=2 ,_UpperCAmelCase : Optional[int]=0.25 ,**_UpperCAmelCase : Tuple ,):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
_a : Optional[Any] = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : str = backbone_config.get('model_type' )
_a : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
_a : List[Any] = config_class.from_dict(_UpperCAmelCase )
_a : Tuple = use_timm_backbone
_a : Union[str, Any] = backbone_config
_a : List[Any] = num_channels
_a : Union[str, Any] = num_queries
_a : Optional[Any] = d_model
_a : Tuple = encoder_ffn_dim
_a : Dict = encoder_layers
_a : List[str] = encoder_attention_heads
_a : Union[str, Any] = decoder_ffn_dim
_a : Optional[int] = decoder_layers
_a : int = decoder_attention_heads
_a : Optional[int] = dropout
_a : Tuple = attention_dropout
_a : List[Any] = activation_dropout
_a : str = activation_function
_a : Optional[Any] = init_std
_a : Union[str, Any] = init_xavier_std
_a : List[Any] = encoder_layerdrop
_a : List[Any] = decoder_layerdrop
_a : Dict = encoder_layers
_a : List[Any] = auxiliary_loss
_a : Optional[int] = position_embedding_type
_a : List[Any] = backbone
_a : Optional[int] = use_pretrained_backbone
_a : Optional[int] = dilation
# Hungarian matcher
_a : Tuple = class_cost
_a : str = bbox_cost
_a : Any = giou_cost
# Loss coefficients
_a : Tuple = mask_loss_coefficient
_a : Dict = dice_loss_coefficient
_a : Tuple = cls_loss_coefficient
_a : Any = bbox_loss_coefficient
_a : Dict = giou_loss_coefficient
_a : Union[str, Any] = focal_alpha
super().__init__(is_encoder_decoder=_UpperCAmelCase ,**_UpperCAmelCase )
@property
def __lowercase ( self : Dict ):
return self.encoder_attention_heads
@property
def __lowercase ( self : str ):
return self.d_model
def __lowercase ( self : int ):
_a : List[str] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_a : Dict = self.backbone_config.to_dict()
_a : Union[str, Any] = self.__class__.model_type
return output
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : str = version.parse('1.11' )
@property
def __lowercase ( self : Dict ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def __lowercase ( self : Any ):
return 1E-5
@property
def __lowercase ( self : List[Any] ):
return 12
| 89 | 0 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
a : List[Any] = logging.get_logger(__name__)
class UpperCamelCase__ ( _UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = ['audio_values', 'audio_mask']
def __init__( self , snake_case=2_0_4_8 , snake_case=1 , snake_case=[1_6, 1_6] , snake_case=1_2_8 , snake_case=4_4_1_0_0 , snake_case=8_6 , snake_case=2_0_4_8 , snake_case=0.0 , **snake_case , ):
'''simple docstring'''
super().__init__(
feature_size=_UpperCAmelCase , sampling_rate=_UpperCAmelCase , padding_value=_UpperCAmelCase , **_UpperCAmelCase , )
UpperCAmelCase : List[Any] = spectrogram_length
UpperCAmelCase : Tuple = num_channels
UpperCAmelCase : Dict = patch_size
UpperCAmelCase : Optional[Any] = feature_size // self.patch_size[1]
UpperCAmelCase : Tuple = n_fft
UpperCAmelCase : List[Any] = sampling_rate // hop_length_to_sampling_rate
UpperCAmelCase : List[str] = sampling_rate
UpperCAmelCase : Union[str, Any] = padding_value
UpperCAmelCase : Any = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_UpperCAmelCase , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=_UpperCAmelCase , norm="slaney" , mel_scale="slaney" , ).T
def A_ ( self , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = spectrogram(
_UpperCAmelCase , window_function(self.n_fft , "hann" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="dB" , db_range=80.0 , )
UpperCAmelCase : List[Any] = log_spec[:, :-1]
UpperCAmelCase : Optional[Any] = log_spec - 20.0
UpperCAmelCase : Dict = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self , snake_case , snake_case = None , snake_case = True , snake_case = None , snake_case = False , snake_case = False , **snake_case , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"This feature extractor is set to support sampling rate"
f" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"
f" with {self.sampling_rate} and not {sampling_rate}." )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
UpperCAmelCase : Optional[Any] = isinstance(_UpperCAmelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"Only mono-channel audio is supported for input to {self}" )
UpperCAmelCase : Optional[int] = is_batched_numpy or (
isinstance(_UpperCAmelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
UpperCAmelCase : Union[str, Any] = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_UpperCAmelCase , np.ndarray ):
UpperCAmelCase : Optional[int] = np.asarray(_UpperCAmelCase , dtype=np.floataa )
elif isinstance(_UpperCAmelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
UpperCAmelCase : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase : Dict = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
UpperCAmelCase : Any = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , _UpperCAmelCase ):
UpperCAmelCase : Tuple = [np.asarray(_UpperCAmelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
UpperCAmelCase : List[Any] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
UpperCAmelCase : Dict = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
UpperCAmelCase : Union[str, Any] = np.array(_UpperCAmelCase ).astype(np.floataa )
# convert into correct format for padding
UpperCAmelCase : Dict = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
UpperCAmelCase : Union[str, Any] = np.ones([len(_UpperCAmelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
UpperCAmelCase : List[Any] = padded_audio_features * self.padding_value
for i in range(len(_UpperCAmelCase ) ):
UpperCAmelCase : Any = audio_features[i]
UpperCAmelCase : List[Any] = feature
# return as BatchFeature
if return_attention_mask:
UpperCAmelCase : Tuple = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
UpperCAmelCase : Tuple = {'audio_values': padded_audio_features}
UpperCAmelCase : Optional[Any] = BatchFeature(data=_UpperCAmelCase , tensor_type=_UpperCAmelCase )
return encoded_inputs
| 311 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __magic_name__ :
def __init__( self : List[str] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : List[str]=13 ,_UpperCAmelCase : Any=32 ,_UpperCAmelCase : Union[str, Any]=3 ,_UpperCAmelCase : Optional[int]=4 ,_UpperCAmelCase : Optional[Any]=[10, 20, 30, 40] ,_UpperCAmelCase : Tuple=[2, 2, 3, 2] ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Union[str, Any]=37 ,_UpperCAmelCase : Optional[int]="gelu" ,_UpperCAmelCase : Optional[Any]=10 ,_UpperCAmelCase : Tuple=0.02 ,_UpperCAmelCase : Any=["stage2", "stage3", "stage4"] ,_UpperCAmelCase : Any=[2, 3, 4] ,_UpperCAmelCase : Tuple=None ,):
_a : Optional[Any] = parent
_a : List[Any] = batch_size
_a : str = image_size
_a : Union[str, Any] = num_channels
_a : List[Any] = num_stages
_a : Dict = hidden_sizes
_a : int = depths
_a : Tuple = is_training
_a : List[str] = use_labels
_a : Dict = intermediate_size
_a : int = hidden_act
_a : int = num_labels
_a : Any = initializer_range
_a : Tuple = out_features
_a : int = out_indices
_a : List[Any] = scope
def __lowercase ( self : Dict ):
_a : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Union[str, Any] = None
if self.use_labels:
_a : Tuple = ids_tensor([self.batch_size] ,self.num_labels )
_a : str = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Any ):
return ConvNextVaConfig(
num_channels=self.num_channels ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_stages=self.num_stages ,hidden_act=self.hidden_act ,is_decoder=_UpperCAmelCase ,initializer_range=self.initializer_range ,out_features=self.out_features ,out_indices=self.out_indices ,num_labels=self.num_labels ,)
def __lowercase ( self : Tuple ,_UpperCAmelCase : Any ,_UpperCAmelCase : Any ,_UpperCAmelCase : Optional[Any] ):
_a : Optional[Any] = ConvNextVaModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Any = model(_UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def __lowercase ( self : Tuple ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ):
_a : List[Any] = ConvNextVaForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : List[str] = model(_UpperCAmelCase ,labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowercase ( self : str ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[Any] ):
_a : Optional[int] = ConvNextVaBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Dict = model(_UpperCAmelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_a : Tuple = None
_a : List[Any] = ConvNextVaBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : List[str] = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def __lowercase ( self : Optional[Any] ):
_a : Any = self.prepare_config_and_inputs()
_a , _a , _a : Union[str, Any] = config_and_inputs
_a : Any = {'pixel_values': pixel_values}
return config, inputs_dict
def __lowercase ( self : str ):
_a : Tuple = self.prepare_config_and_inputs()
_a , _a , _a : Tuple = config_and_inputs
_a : List[Any] = {'pixel_values': pixel_values, 'labels': labels}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : str = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase : str = (
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase : int = False
lowerCAmelCase : str = False
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : List[str] = False
lowerCAmelCase : Optional[int] = False
def __lowercase ( self : List[Any] ):
_a : str = ConvNextVaModelTester(self )
_a : Tuple = ConfigTester(self ,config_class=_UpperCAmelCase ,has_text_modality=_UpperCAmelCase ,hidden_size=37 )
def __lowercase ( self : Optional[Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowercase ( self : str ):
return
@unittest.skip(reason='ConvNextV2 does not use inputs_embeds' )
def __lowercase ( self : List[Any] ):
pass
@unittest.skip(reason='ConvNextV2 does not support input and output embeddings' )
def __lowercase ( self : Optional[int] ):
pass
@unittest.skip(reason='ConvNextV2 does not use feedforward chunking' )
def __lowercase ( self : Any ):
pass
def __lowercase ( self : List[str] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
_a : Any = True
if model_class.__name__ in [
*get_values(_UpperCAmelCase ),
*get_values(_UpperCAmelCase ),
]:
continue
_a : Optional[Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
_a : str = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase )
_a : Optional[int] = model(**_UpperCAmelCase ).loss
loss.backward()
def __lowercase ( self : str ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
_a : Optional[int] = False
_a : Tuple = True
if (
model_class.__name__
in [*get_values(_UpperCAmelCase ), *get_values(_UpperCAmelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
_a : Tuple = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.train()
_a : Any = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase )
_a : List[Any] = model(**_UpperCAmelCase ).loss
loss.backward()
def __lowercase ( self : List[Any] ):
_a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : int = model_class(_UpperCAmelCase )
_a : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Dict = [*signature.parameters.keys()]
_a : int = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_UpperCAmelCase )
def __lowercase ( self : int ):
_a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def __lowercase ( self : Any ):
def check_hidden_states_output(_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Dict ):
_a : Union[str, Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_a : List[Any] = model(**self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ) )
_a : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_a : str = self.model_tester.num_stages
self.assertEqual(len(_UpperCAmelCase ) ,expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
_a , _a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : int = True
check_hidden_states_output(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : Optional[Any] = True
check_hidden_states_output(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
def __lowercase ( self : List[Any] ):
_a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def __lowercase ( self : int ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Any = ConvNextVaModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __lowerCamelCase ( ) -> List[Any]:
_a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
@cached_property
def __lowercase ( self : Optional[Any] ):
return AutoImageProcessor.from_pretrained('facebook/convnextv2-tiny-1k-224' ) if is_vision_available() else None
@slow
def __lowercase ( self : Any ):
_a : List[str] = ConvNextVaForImageClassification.from_pretrained('facebook/convnextv2-tiny-1k-224' ).to(_UpperCAmelCase )
_a : Optional[int] = self.default_image_processor
_a : str = prepare_img()
_a : str = preprocessor(images=_UpperCAmelCase ,return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_a : Dict = model(**_UpperCAmelCase )
# verify the logits
_a : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,_UpperCAmelCase )
_a : Optional[Any] = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_UpperCAmelCase ,atol=1E-4 ) )
| 89 | 0 |
'''simple docstring'''
import json
import os
import unittest
from transformers import BatchEncoding, LEDTokenizer, LEDTokenizerFast
from transformers.models.led.tokenization_led import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , unittest.TestCase ):
snake_case_ = LEDTokenizer
snake_case_ = LEDTokenizerFast
snake_case_ = True
def __magic_name__ ( self : List[str] ) -> Optional[int]:
super().setUp()
SCREAMING_SNAKE_CASE__ : str =[
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'<unk>',
]
SCREAMING_SNAKE_CASE__ : Tuple =dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
SCREAMING_SNAKE_CASE__ : List[str] =['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
SCREAMING_SNAKE_CASE__ : Tuple ={'unk_token': '<unk>'}
SCREAMING_SNAKE_CASE__ : Tuple =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
SCREAMING_SNAKE_CASE__ : Optional[Any] =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_UpperCAmelCase ) )
def __magic_name__ ( self : List[Any] , **__lowercase : List[Any] ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def __magic_name__ ( self : Dict , **__lowercase : str ) -> Any:
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def __magic_name__ ( self : Any , __lowercase : Any ) -> int:
return "lower newer", "lower newer"
@cached_property
def __magic_name__ ( self : str ) -> Optional[int]:
return LEDTokenizer.from_pretrained('''allenai/led-base-16384''' )
@cached_property
def __magic_name__ ( self : Optional[Any] ) -> Dict:
return LEDTokenizerFast.from_pretrained('''allenai/led-base-16384''' )
@require_torch
def __magic_name__ ( self : List[str] ) -> Dict:
SCREAMING_SNAKE_CASE__ : Any =['A long paragraph for summarization.', 'Another paragraph for summarization.']
SCREAMING_SNAKE_CASE__ : Optional[Any] =[0, 2_50, 2_51, 1_78_18, 13, 3_91_86, 19_38, 4, 2]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer(_UpperCAmelCase , max_length=len(_UpperCAmelCase ) , padding=_UpperCAmelCase , return_tensors='''pt''' )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual((2, 9) , batch.input_ids.shape )
self.assertEqual((2, 9) , batch.attention_mask.shape )
SCREAMING_SNAKE_CASE__ : int =batch.input_ids.tolist()[0]
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
@require_torch
def __magic_name__ ( self : Union[str, Any] ) -> Any:
SCREAMING_SNAKE_CASE__ : Dict =['A long paragraph for summarization.', 'Another paragraph for summarization.']
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE__ : int =tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase , return_tensors='''pt''' )
self.assertIn('''input_ids''' , _UpperCAmelCase )
self.assertIn('''attention_mask''' , _UpperCAmelCase )
self.assertNotIn('''labels''' , _UpperCAmelCase )
self.assertNotIn('''decoder_attention_mask''' , _UpperCAmelCase )
@require_torch
def __magic_name__ ( self : str ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[Any] =[
'Summary of the text.',
'Another summary.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE__ : Dict =tokenizer(text_target=_UpperCAmelCase , max_length=32 , padding='''max_length''' , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
@require_torch
def __magic_name__ ( self : int ) -> int:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE__ : Any =tokenizer(
['''I am a small frog''' * 10_24, '''I am a small frog'''] , padding=_UpperCAmelCase , truncation=_UpperCAmelCase , return_tensors='''pt''' )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(batch.input_ids.shape , (2, 51_22) )
@require_torch
def __magic_name__ ( self : List[str] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : int =['A long paragraph for summarization.']
SCREAMING_SNAKE_CASE__ : int =[
'Summary of the text.',
]
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE__ : Dict =tokenizer(_UpperCAmelCase , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ : List[str] =tokenizer(text_target=_UpperCAmelCase , return_tensors='''pt''' )
SCREAMING_SNAKE_CASE__ : List[Any] =inputs['input_ids']
SCREAMING_SNAKE_CASE__ : Tuple =targets['input_ids']
self.assertTrue((input_ids[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((labels[:, 0] == tokenizer.bos_token_id).all().item() )
self.assertTrue((input_ids[:, -1] == tokenizer.eos_token_id).all().item() )
self.assertTrue((labels[:, -1] == tokenizer.eos_token_id).all().item() )
@require_torch
def __magic_name__ ( self : Union[str, Any] ) -> Optional[Any]:
for tokenizer in [self.default_tokenizer, self.default_tokenizer_fast]:
SCREAMING_SNAKE_CASE__ : Optional[Any] =['Summary of the text.', 'Another summary.']
SCREAMING_SNAKE_CASE__ : int =[[0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, -1, -1]]
SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer(_UpperCAmelCase , padding=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int =[[0] * len(_UpperCAmelCase ) for x in encoded_output['input_ids']]
SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer.pad(_UpperCAmelCase )
self.assertSequenceEqual(outputs['''global_attention_mask'''] , _UpperCAmelCase )
def __magic_name__ ( self : List[str] ) -> List[Any]:
pass
def __magic_name__ ( self : int ) -> Tuple:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F"{tokenizer.__class__.__name__} ({pretrained_name})" ):
SCREAMING_SNAKE_CASE__ : Dict =self.rust_tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int =self.tokenizer_class.from_pretrained(_UpperCAmelCase , **_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] ='A, <mask> AllenNLP sentence.'
SCREAMING_SNAKE_CASE__ : int =tokenizer_r.encode_plus(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] =tokenizer_p.encode_plus(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_token_type_ids=_UpperCAmelCase )
self.assertEqual(sum(tokens_r['''token_type_ids'''] ) , sum(tokens_p['''token_type_ids'''] ) )
self.assertEqual(
sum(tokens_r['''attention_mask'''] ) / len(tokens_r['''attention_mask'''] ) , sum(tokens_p['''attention_mask'''] ) / len(tokens_p['''attention_mask'''] ) , )
SCREAMING_SNAKE_CASE__ : Any =tokenizer_r.convert_ids_to_tokens(tokens_r['''input_ids'''] )
SCREAMING_SNAKE_CASE__ : Optional[Any] =tokenizer_p.convert_ids_to_tokens(tokens_p['''input_ids'''] )
self.assertSequenceEqual(tokens_p['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(tokens_r['''input_ids'''] , [0, 2_50, 6, 5_02_64, 38_23, 4_87, 2_19_92, 36_45, 4, 2] )
self.assertSequenceEqual(
_UpperCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] )
self.assertSequenceEqual(
_UpperCAmelCase , ['''<s>''', '''A''', ''',''', '''<mask>''', '''ĠAllen''', '''N''', '''LP''', '''Ġsentence''', '''.''', '''</s>'''] ) | 152 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
'''configuration_lilt''': ['''LILT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LiltConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''LILT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LiltForQuestionAnswering''',
'''LiltForSequenceClassification''',
'''LiltForTokenClassification''',
'''LiltModel''',
'''LiltPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89 | 0 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def UpperCamelCase__ ( A__ , A__ ) -> np.array:
snake_case__ : Optional[int] = F"""{sampling_rate}"""
snake_case__ : Any = '1'
snake_case__ : Optional[int] = 'f32le'
snake_case__ : Any = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(lowerCAmelCase_ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
snake_case__ : int = ffmpeg_process.communicate(lowerCAmelCase_ )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
snake_case__ : int = output_stream[0]
snake_case__ : List[str] = np.frombuffer(lowerCAmelCase_ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def UpperCamelCase__ ( A__ , A__ , A__ = "f32le" , ) -> Union[str, Any]:
snake_case__ : List[str] = F"""{sampling_rate}"""
snake_case__ : List[str] = '1'
if format_for_conversion == "s16le":
snake_case__ : List[Any] = 2
elif format_for_conversion == "f32le":
snake_case__ : Dict = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
snake_case__ : Any = platform.system()
if system == "Linux":
snake_case__ : Union[str, Any] = 'alsa'
snake_case__ : Union[str, Any] = 'default'
elif system == "Darwin":
snake_case__ : Any = 'avfoundation'
snake_case__ : Optional[int] = ':0'
elif system == "Windows":
snake_case__ : str = 'dshow'
snake_case__ : Tuple = 'default'
snake_case__ : str = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
snake_case__ : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
snake_case__ : Union[str, Any] = _ffmpeg_stream(lowerCAmelCase_ , lowerCAmelCase_ )
for item in iterator:
yield item
def UpperCamelCase__ ( A__ , A__ , A__ = None , A__ = None , A__ = "f32le" , ) -> str:
if stream_chunk_s is not None:
snake_case__ : str = stream_chunk_s
else:
snake_case__ : List[str] = chunk_length_s
snake_case__ : int = ffmpeg_microphone(lowerCAmelCase_ , lowerCAmelCase_ , format_for_conversion=lowerCAmelCase_ )
if format_for_conversion == "s16le":
snake_case__ : Optional[Any] = np.intaa
snake_case__ : List[Any] = 2
elif format_for_conversion == "f32le":
snake_case__ : Tuple = np.floataa
snake_case__ : Any = 4
else:
raise ValueError(F"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
snake_case__ : str = chunk_length_s / 6
snake_case__ : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(lowerCAmelCase_ , (int, float) ):
snake_case__ : List[str] = [stride_length_s, stride_length_s]
snake_case__ : str = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
snake_case__ : List[str] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
snake_case__ : Any = datetime.datetime.now()
snake_case__ : Dict = datetime.timedelta(seconds=lowerCAmelCase_ )
for item in chunk_bytes_iter(lowerCAmelCase_ , lowerCAmelCase_ , stride=(stride_left, stride_right) , stream=lowerCAmelCase_ ):
# Put everything back in numpy scale
snake_case__ : List[Any] = np.frombuffer(item['raw'] , dtype=lowerCAmelCase_ )
snake_case__ : List[str] = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
snake_case__ : Union[str, Any] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def UpperCamelCase__ ( A__ , A__ , A__ , A__ = False ) -> List[Any]:
snake_case__ : Tuple = B''
snake_case__ : str = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
F"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
snake_case__ : Optional[int] = 0
for raw in iterator:
acc += raw
if stream and len(lowerCAmelCase_ ) < chunk_len:
snake_case__ : str = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(lowerCAmelCase_ ) >= chunk_len:
# We are flushing the accumulator
snake_case__ : Union[str, Any] = (_stride_left, stride_right)
snake_case__ : Dict = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
snake_case__ : List[str] = False
yield item
snake_case__ : int = stride_left
snake_case__ : List[Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(lowerCAmelCase_ ) > stride_left:
snake_case__ : str = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
snake_case__ : str = False
yield item
def UpperCamelCase__ ( A__ , A__ ) -> Tuple:
snake_case__ : Optional[Any] = 2**24 # 16Mo
try:
with subprocess.Popen(lowerCAmelCase_ , stdout=subprocess.PIPE , bufsize=lowerCAmelCase_ ) as ffmpeg_process:
while True:
snake_case__ : Any = ffmpeg_process.stdout.read(lowerCAmelCase_ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 143 |
'''simple docstring'''
import math
def __lowerCamelCase ( lowerCAmelCase_ ) -> bool:
_a : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ = 1 / 12345 ) -> int:
_a : int = 0
_a : Optional[Any] = 0
_a : int = 3
while True:
_a : Tuple = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(lowerCAmelCase_ ):
_a : Union[str, Any] = int(lowerCAmelCase_ )
total_partitions += 1
if check_partition_perfect(lowerCAmelCase_ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(lowerCAmelCase_ )
integer += 1
if __name__ == "__main__":
print(f"""{solution() = }""")
| 89 | 0 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
_a = logging.get_logger(__name__)
_a = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
_a = {
'''vocab_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json'''
},
'''merges_file''': {
'''allegro/herbert-base-cased''': '''https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt'''
},
}
_a = {'''allegro/herbert-base-cased''': 514}
_a = {}
class __lowerCamelCase ( _UpperCamelCase):
"""simple docstring"""
UpperCamelCase__ = VOCAB_FILES_NAMES
UpperCamelCase__ = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ = PRETRAINED_INIT_CONFIGURATION
UpperCamelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ = HerbertTokenizer
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="<s>" , UpperCAmelCase="<unk>" , UpperCAmelCase="<pad>" , UpperCAmelCase="<mask>" , UpperCAmelCase="</s>" , **UpperCAmelCase , ):
"""simple docstring"""
super().__init__(
_UpperCAmelCase , _UpperCAmelCase , tokenizer_file=_UpperCAmelCase , cls_token=_UpperCAmelCase , unk_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , **_UpperCAmelCase , )
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = None ):
"""simple docstring"""
_UpperCAmelCase = [self.cls_token_id]
_UpperCAmelCase = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = None , UpperCAmelCase = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase , token_ids_a=_UpperCAmelCase , already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1] + ([0] * len(_UpperCAmelCase )) + [1]
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = None ):
"""simple docstring"""
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = None ):
"""simple docstring"""
_UpperCAmelCase = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
| 39 |
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=1 ) -> Dict:
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=0 ) -> Tuple:
_a : Any = []
for old_item in old_list:
_a : Union[str, Any] = old_item.replace('in_layers.0' , 'norm1' )
_a : Optional[int] = new_item.replace('in_layers.2' , 'conv1' )
_a : str = new_item.replace('out_layers.0' , 'norm2' )
_a : List[str] = new_item.replace('out_layers.3' , 'conv2' )
_a : str = new_item.replace('emb_layers.1' , 'time_emb_proj' )
_a : Tuple = new_item.replace('skip_connection' , 'conv_shortcut' )
_a : Any = shave_segments(lowerCAmelCase_ , n_shave_prefix_segments=lowerCAmelCase_ )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=0 ) -> Any:
_a : List[str] = []
for old_item in old_list:
_a : List[Any] = old_item
_a : Optional[int] = new_item.replace('norm.weight' , 'group_norm.weight' )
_a : Optional[Any] = new_item.replace('norm.bias' , 'group_norm.bias' )
_a : Any = new_item.replace('proj_out.weight' , 'proj_attn.weight' )
_a : Optional[Any] = new_item.replace('proj_out.bias' , 'proj_attn.bias' )
_a : Optional[int] = shave_segments(lowerCAmelCase_ , n_shave_prefix_segments=lowerCAmelCase_ )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> Any:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_a : Optional[Any] = old_checkpoint[path]
_a : Optional[Any] = old_tensor.shape[0] // 3
_a : Any = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_a : int = old_tensor.shape[0] // config['num_head_channels'] // 3
_a : str = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_a , _a , _a : Tuple = old_tensor.split(channels // num_heads , dim=1 )
_a : Dict = query.reshape(lowerCAmelCase_ )
_a : str = key.reshape(lowerCAmelCase_ )
_a : Optional[int] = value.reshape(lowerCAmelCase_ )
for path in paths:
_a : Dict = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_a : Any = new_path.replace('middle_block.0' , 'mid_block.resnets.0' )
_a : str = new_path.replace('middle_block.1' , 'mid_block.attentions.0' )
_a : Union[str, Any] = new_path.replace('middle_block.2' , 'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
_a : int = new_path.replace(replacement['old'] , replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_a : List[str] = old_checkpoint[path['old']][:, :, 0]
else:
_a : Dict = old_checkpoint[path['old']]
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_a : Optional[int] = {}
_a : Dict = checkpoint['time_embed.0.weight']
_a : Tuple = checkpoint['time_embed.0.bias']
_a : Union[str, Any] = checkpoint['time_embed.2.weight']
_a : List[str] = checkpoint['time_embed.2.bias']
_a : List[str] = checkpoint['input_blocks.0.0.weight']
_a : Union[str, Any] = checkpoint['input_blocks.0.0.bias']
_a : Optional[int] = checkpoint['out.0.weight']
_a : int = checkpoint['out.0.bias']
_a : List[str] = checkpoint['out.2.weight']
_a : Optional[int] = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
_a : Optional[int] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
_a : Dict = {
layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key]
for layer_id in range(lowerCAmelCase_ )
}
# Retrieves the keys for the middle blocks only
_a : List[Any] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
_a : Union[str, Any] = {
layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key]
for layer_id in range(lowerCAmelCase_ )
}
# Retrieves the keys for the output blocks only
_a : Optional[int] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
_a : str = {
layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key]
for layer_id in range(lowerCAmelCase_ )
}
for i in range(1 , lowerCAmelCase_ ):
_a : List[Any] = (i - 1) // (config['num_res_blocks'] + 1)
_a : Optional[int] = (i - 1) % (config['num_res_blocks'] + 1)
_a : Optional[int] = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key]
_a : Optional[Any] = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key]
if f"""input_blocks.{i}.0.op.weight""" in checkpoint:
_a : List[Any] = checkpoint[
f"""input_blocks.{i}.0.op.weight"""
]
_a : Union[str, Any] = checkpoint[
f"""input_blocks.{i}.0.op.bias"""
]
continue
_a : Any = renew_resnet_paths(lowerCAmelCase_ )
_a : List[str] = {'old': f"""input_blocks.{i}.0""", 'new': f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
_a : Optional[Any] = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path, resnet_op] , config=lowerCAmelCase_ )
if len(lowerCAmelCase_ ):
_a : List[str] = renew_attention_paths(lowerCAmelCase_ )
_a : List[Any] = {
'old': f"""input_blocks.{i}.1""",
'new': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
_a : Optional[Any] = {
f"""input_blocks.{i}.1.qkv.bias""": {
'key': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'query': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'value': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""input_blocks.{i}.1.qkv.weight""": {
'key': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'query': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'value': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , attention_paths_to_split=lowerCAmelCase_ , config=lowerCAmelCase_ , )
_a : str = middle_blocks[0]
_a : Tuple = middle_blocks[1]
_a : Any = middle_blocks[2]
_a : List[Any] = renew_resnet_paths(lowerCAmelCase_ )
assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , config=lowerCAmelCase_ )
_a : Any = renew_resnet_paths(lowerCAmelCase_ )
assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , config=lowerCAmelCase_ )
_a : int = renew_attention_paths(lowerCAmelCase_ )
_a : int = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , attention_paths_to_split=lowerCAmelCase_ , config=lowerCAmelCase_ )
for i in range(lowerCAmelCase_ ):
_a : List[str] = i // (config['num_res_blocks'] + 1)
_a : Any = i % (config['num_res_blocks'] + 1)
_a : Union[str, Any] = [shave_segments(lowerCAmelCase_ , 2 ) for name in output_blocks[i]]
_a : Optional[Any] = {}
for layer in output_block_layers:
_a , _a : str = layer.split('.' )[0], shave_segments(lowerCAmelCase_ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(lowerCAmelCase_ )
else:
_a : str = [layer_name]
if len(lowerCAmelCase_ ) > 1:
_a : str = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key]
_a : Optional[Any] = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key]
_a : Dict = renew_resnet_paths(lowerCAmelCase_ )
_a : str = renew_resnet_paths(lowerCAmelCase_ )
_a : Optional[int] = {'old': f"""output_blocks.{i}.0""", 'new': f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , config=lowerCAmelCase_ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_a : List[Any] = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
_a : Tuple = checkpoint[
f"""output_blocks.{i}.{index}.conv.weight"""
]
_a : List[str] = checkpoint[
f"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(lowerCAmelCase_ ) == 2:
_a : Union[str, Any] = []
if len(lowerCAmelCase_ ):
_a : Tuple = renew_attention_paths(lowerCAmelCase_ )
_a : str = {
'old': f"""output_blocks.{i}.1""",
'new': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
_a : List[Any] = {
f"""output_blocks.{i}.1.qkv.bias""": {
'key': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'query': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'value': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""output_blocks.{i}.1.qkv.weight""": {
'key': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'query': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'value': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=lowerCAmelCase_ , )
else:
_a : List[Any] = renew_resnet_paths(lowerCAmelCase_ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_a : int = '.'.join(['output_blocks', str(lowerCAmelCase_ ), path['old']] )
_a : Union[str, Any] = '.'.join(['up_blocks', str(lowerCAmelCase_ ), 'resnets', str(lowerCAmelCase_ ), path['new']] )
_a : Union[str, Any] = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__lowerCAmelCase = json.loads(f.read())
__lowerCAmelCase = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__lowerCAmelCase = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__lowerCAmelCase = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__lowerCAmelCase = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__lowerCAmelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 89 | 0 |
from abc import ABC, abstractmethod
from typing import Optional, Union
from .. import Dataset, DatasetDict, Features, IterableDataset, IterableDatasetDict, NamedSplit
from ..utils.typing import NestedDataStructureLike, PathLike
class A (_UpperCamelCase ):
'''simple docstring'''
def __init__( self : int , __lowerCAmelCase : Optional[NestedDataStructureLike[PathLike]] = None , __lowerCAmelCase : Optional[NamedSplit] = None , __lowerCAmelCase : Optional[Features] = None , __lowerCAmelCase : str = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional[int] = None , **__lowerCAmelCase : Union[str, Any] , ) -> str:
"""simple docstring"""
A__ = path_or_paths
A__ = split if split or isinstance(_UpperCAmelCase , _UpperCAmelCase ) else 'train'
A__ = features
A__ = cache_dir
A__ = keep_in_memory
A__ = streaming
A__ = num_proc
A__ = kwargs
@abstractmethod
def a_ ( self : Tuple ) -> Dict:
"""simple docstring"""
pass
class A (_UpperCamelCase ):
'''simple docstring'''
def __init__( self : Optional[Any] , __lowerCAmelCase : Optional[Features] = None , __lowerCAmelCase : str = None , __lowerCAmelCase : bool = False , __lowerCAmelCase : bool = False , __lowerCAmelCase : Optional[int] = None , **__lowerCAmelCase : Optional[int] , ) -> str:
"""simple docstring"""
A__ = features
A__ = cache_dir
A__ = keep_in_memory
A__ = streaming
A__ = num_proc
A__ = kwargs
@abstractmethod
def a_ ( self : List[Any] ) -> List[Any]:
"""simple docstring"""
pass
| 274 |
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> np.array:
_a : Optional[int] = f"""{sampling_rate}"""
_a : Any = '1'
_a : Optional[int] = 'f32le'
_a : Any = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(lowerCAmelCase_ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
_a : int = ffmpeg_process.communicate(lowerCAmelCase_ )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
_a : int = output_stream[0]
_a : List[str] = np.frombuffer(lowerCAmelCase_ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = "f32le" , ) -> Union[str, Any]:
_a : List[str] = f"""{sampling_rate}"""
_a : List[str] = '1'
if format_for_conversion == "s16le":
_a : List[Any] = 2
elif format_for_conversion == "f32le":
_a : Dict = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
_a : Any = platform.system()
if system == "Linux":
_a : Union[str, Any] = 'alsa'
_a : Union[str, Any] = 'default'
elif system == "Darwin":
_a : Any = 'avfoundation'
_a : Optional[int] = ':0'
elif system == "Windows":
_a : str = 'dshow'
_a : Tuple = 'default'
_a : str = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
_a : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
_a : Union[str, Any] = _ffmpeg_stream(lowerCAmelCase_ , lowerCAmelCase_ )
for item in iterator:
yield item
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = "f32le" , ) -> str:
if stream_chunk_s is not None:
_a : str = stream_chunk_s
else:
_a : List[str] = chunk_length_s
_a : int = ffmpeg_microphone(lowerCAmelCase_ , lowerCAmelCase_ , format_for_conversion=lowerCAmelCase_ )
if format_for_conversion == "s16le":
_a : Optional[Any] = np.intaa
_a : List[Any] = 2
elif format_for_conversion == "f32le":
_a : Tuple = np.floataa
_a : Any = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
_a : str = chunk_length_s / 6
_a : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(lowerCAmelCase_ , (int, float) ):
_a : List[str] = [stride_length_s, stride_length_s]
_a : str = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
_a : List[str] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
_a : Any = datetime.datetime.now()
_a : Dict = datetime.timedelta(seconds=lowerCAmelCase_ )
for item in chunk_bytes_iter(lowerCAmelCase_ , lowerCAmelCase_ , stride=(stride_left, stride_right) , stream=lowerCAmelCase_ ):
# Put everything back in numpy scale
_a : List[Any] = np.frombuffer(item['raw'] , dtype=lowerCAmelCase_ )
_a : List[str] = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
_a : Union[str, Any] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False ) -> List[Any]:
_a : Tuple = B''
_a , _a : str = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
_a : Optional[int] = 0
for raw in iterator:
acc += raw
if stream and len(lowerCAmelCase_ ) < chunk_len:
_a : str = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(lowerCAmelCase_ ) >= chunk_len:
# We are flushing the accumulator
_a : Union[str, Any] = (_stride_left, stride_right)
_a : Dict = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
_a : List[str] = False
yield item
_a : int = stride_left
_a : List[Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(lowerCAmelCase_ ) > stride_left:
_a : str = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
_a : str = False
yield item
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
_a : Optional[Any] = 2**24 # 16Mo
try:
with subprocess.Popen(lowerCAmelCase_ , stdout=subprocess.PIPE , bufsize=lowerCAmelCase_ ) as ffmpeg_process:
while True:
_a : Any = ffmpeg_process.stdout.read(lowerCAmelCase_ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 89 | 0 |
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
__lowerCamelCase : str = '''Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'''
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[Any] = _ask_options(
"In which compute environment are you running?" , ["This machine", "AWS (Amazon SageMaker)"] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
SCREAMING_SNAKE_CASE_ : Any = get_sagemaker_input()
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = get_cluster_input()
return config
def _snake_case ( lowerCAmelCase : Tuple=None ):
"""simple docstring"""
if subparsers is not None:
SCREAMING_SNAKE_CASE_ : Tuple = subparsers.add_parser("config" , description=lowerCAmelCase_ )
else:
SCREAMING_SNAKE_CASE_ : List[Any] = argparse.ArgumentParser("Accelerate config command" , description=lowerCAmelCase_ )
parser.add_argument(
"--config_file" , default=lowerCAmelCase_ , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have "
"such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed "
"with \'huggingface\'."
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCAmelCase_ )
return parser
def _snake_case ( lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Dict = get_user_input()
if args.config_file is not None:
SCREAMING_SNAKE_CASE_ : List[Any] = args.config_file
else:
if not os.path.isdir(lowerCAmelCase_ ):
os.makedirs(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : str = default_yaml_config_file
if config_file.endswith(".json" ):
config.to_json_file(lowerCAmelCase_ )
else:
config.to_yaml_file(lowerCAmelCase_ )
print(f'accelerate configuration saved at {config_file}' )
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = config_command_parser()
SCREAMING_SNAKE_CASE_ : List[Any] = parser.parse_args()
config_command(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 18 |
'''simple docstring'''
__lowerCAmelCase = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]:
_a : List[Any] = set()
# keep track of all the paths to be checked
_a : Any = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
_a : Tuple = queue.pop(0 )
# get the last node from the path
_a : Tuple = path[-1]
if node not in explored:
_a : Optional[Any] = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
_a : Any = list(lowerCAmelCase_ )
new_path.append(lowerCAmelCase_ )
queue.append(lowerCAmelCase_ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(lowerCAmelCase_ )
# in case there's no path between the 2 nodes
return []
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
_a : Optional[int] = [start]
_a : Dict = set(lowerCAmelCase_ )
# Keep tab on distances from `start` node.
_a : Dict = {start: 0, target: -1}
while queue:
_a : List[str] = queue.pop(0 )
if node == target:
_a : Any = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(lowerCAmelCase_ )
queue.append(lowerCAmelCase_ )
_a : Any = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 89 | 0 |
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowercase__ : int = logging.get_logger(__name__)
lowercase__ : Optional[int] = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
UpperCAmelCase_ : Any = 'conditional_detr'
UpperCAmelCase_ : List[str] = ['past_key_values']
UpperCAmelCase_ : Optional[int] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=3 , __SCREAMING_SNAKE_CASE=300 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=2048 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=6 , __SCREAMING_SNAKE_CASE=2048 , __SCREAMING_SNAKE_CASE=8 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="relu" , __SCREAMING_SNAKE_CASE=256 , __SCREAMING_SNAKE_CASE=0.1 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0 , __SCREAMING_SNAKE_CASE=0.0_2 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="sine" , __SCREAMING_SNAKE_CASE="resnet50" , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=1 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=5 , __SCREAMING_SNAKE_CASE=2 , __SCREAMING_SNAKE_CASE=0.2_5 , **__SCREAMING_SNAKE_CASE , ) ->Optional[int]:
if backbone_config is not None and use_timm_backbone:
raise ValueError('''You can\'t specify both `backbone_config` and `use_timm_backbone`.''' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('''`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.''' )
lowerCAmelCase = CONFIG_MAPPING['resnet'](out_features=['''stage4'''] )
elif isinstance(_UpperCAmelCase , _UpperCAmelCase ):
lowerCAmelCase = backbone_config.get('''model_type''' )
lowerCAmelCase = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase = config_class.from_dict(_UpperCAmelCase )
lowerCAmelCase = use_timm_backbone
lowerCAmelCase = backbone_config
lowerCAmelCase = num_channels
lowerCAmelCase = num_queries
lowerCAmelCase = d_model
lowerCAmelCase = encoder_ffn_dim
lowerCAmelCase = encoder_layers
lowerCAmelCase = encoder_attention_heads
lowerCAmelCase = decoder_ffn_dim
lowerCAmelCase = decoder_layers
lowerCAmelCase = decoder_attention_heads
lowerCAmelCase = dropout
lowerCAmelCase = attention_dropout
lowerCAmelCase = activation_dropout
lowerCAmelCase = activation_function
lowerCAmelCase = init_std
lowerCAmelCase = init_xavier_std
lowerCAmelCase = encoder_layerdrop
lowerCAmelCase = decoder_layerdrop
lowerCAmelCase = encoder_layers
lowerCAmelCase = auxiliary_loss
lowerCAmelCase = position_embedding_type
lowerCAmelCase = backbone
lowerCAmelCase = use_pretrained_backbone
lowerCAmelCase = dilation
# Hungarian matcher
lowerCAmelCase = class_cost
lowerCAmelCase = bbox_cost
lowerCAmelCase = giou_cost
# Loss coefficients
lowerCAmelCase = mask_loss_coefficient
lowerCAmelCase = dice_loss_coefficient
lowerCAmelCase = cls_loss_coefficient
lowerCAmelCase = bbox_loss_coefficient
lowerCAmelCase = giou_loss_coefficient
lowerCAmelCase = focal_alpha
super().__init__(is_encoder_decoder=_UpperCAmelCase , **_UpperCAmelCase )
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->List[str]:
return self.encoder_attention_heads
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->int:
return self.d_model
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
lowerCAmelCase = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
lowerCAmelCase = self.backbone_config.to_dict()
lowerCAmelCase = self.__class__.model_type
return output
class lowercase_ ( _UpperCamelCase ):
"""simple docstring"""
UpperCAmelCase_ : str = version.parse("""1.11""" )
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Tuple:
return OrderedDict(
[
('''pixel_values''', {0: '''batch''', 1: '''num_channels''', 2: '''height''', 3: '''width'''}),
('''pixel_mask''', {0: '''batch'''}),
] )
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Union[str, Any]:
return 1e-5
@property
def SCREAMING_SNAKE_CASE_ ( self ) ->Any:
return 12
| 338 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase = {'''configuration_swin''': ['''SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SwinConfig''', '''SwinOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwinForImageClassification''',
'''SwinForMaskedImageModeling''',
'''SwinModel''',
'''SwinPreTrainedModel''',
'''SwinBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSwinForImageClassification''',
'''TFSwinForMaskedImageModeling''',
'''TFSwinModel''',
'''TFSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89 | 0 |
"""simple docstring"""
import string
def _lowercase ( __snake_case ) -> None:
for key in range(len(string.ascii_uppercase ) ):
__lowerCAmelCase : Union[str, Any] = ''
for symbol in message:
if symbol in string.ascii_uppercase:
__lowerCAmelCase : Optional[Any] = string.ascii_uppercase.find(lowerCAmelCase_ )
__lowerCAmelCase : List[str] = num - key
if num < 0:
__lowerCAmelCase : str = num + len(string.ascii_uppercase )
__lowerCAmelCase : int = translated + string.ascii_uppercase[num]
else:
__lowerCAmelCase : Dict = translated + symbol
print(F"""Decryption using Key #{key}: {translated}""" )
def _lowercase ( ) -> None:
__lowerCAmelCase : int = input("Encrypted message: " )
__lowerCAmelCase : Tuple = message.upper()
decrypt(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 269 |
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class __magic_name__ ( _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : Optional[int] = BarthezTokenizer
lowerCAmelCase : int = BarthezTokenizerFast
lowerCAmelCase : Dict = True
lowerCAmelCase : str = True
def __lowercase ( self : List[Any] ):
super().setUp()
_a : List[Any] = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname ,legacy_format=_UpperCAmelCase )
_a : Union[str, Any] = tokenizer
def __lowercase ( self : Tuple ):
_a : Optional[Any] = '<pad>'
_a : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) ,_UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) ,_UpperCAmelCase )
def __lowercase ( self : str ):
_a : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<s>' )
self.assertEqual(vocab_keys[1] ,'<pad>' )
self.assertEqual(vocab_keys[-1] ,'<mask>' )
self.assertEqual(len(_UpperCAmelCase ) ,101122 )
def __lowercase ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size ,101122 )
@require_torch
def __lowercase ( self : Dict ):
_a : Any = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_a : Dict = [0, 57, 3018, 70307, 91, 2]
_a : Dict = self.tokenizer(
_UpperCAmelCase ,max_length=len(_UpperCAmelCase ) ,padding=_UpperCAmelCase ,truncation=_UpperCAmelCase ,return_tensors='pt' )
self.assertIsInstance(_UpperCAmelCase ,_UpperCAmelCase )
self.assertEqual((2, 6) ,batch.input_ids.shape )
self.assertEqual((2, 6) ,batch.attention_mask.shape )
_a : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ):
if not self.test_rust_tokenizer:
return
_a : str = self.get_tokenizer()
_a : List[str] = self.get_rust_tokenizer()
_a : Dict = 'I was born in 92000, and this is falsé.'
_a : List[Any] = tokenizer.tokenize(_UpperCAmelCase )
_a : Tuple = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
_a : Optional[Any] = tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase )
_a : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
_a : Union[str, Any] = self.get_rust_tokenizer()
_a : Any = tokenizer.encode(_UpperCAmelCase )
_a : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
@slow
def __lowercase ( self : Optional[int] ):
# fmt: off
_a : Optional[int] = {'input_ids': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_a : Optional[Any] = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase ,model_name='moussaKam/mbarthez' ,revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' ,sequences=_UpperCAmelCase ,)
| 89 | 0 |
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class A__ :
_UpperCAmelCase :CommonSchedulerState
# setable values
_UpperCAmelCase :jnp.ndarray
_UpperCAmelCase :jnp.ndarray
_UpperCAmelCase :Optional[int] = None
@classmethod
def __UpperCamelCase( cls , A_ , A_ , A_ ):
'''simple docstring'''
return cls(common=_UpperCAmelCase , init_noise_sigma=_UpperCAmelCase , timesteps=_UpperCAmelCase )
@dataclass
class A__ ( _UpperCamelCase ):
_UpperCAmelCase :DDPMSchedulerState
class A__ ( _UpperCamelCase , _UpperCamelCase ):
_UpperCAmelCase :Dict = [e.name for e in FlaxKarrasDiffusionSchedulers]
_UpperCAmelCase :jnp.dtype
@property
def __UpperCamelCase( self ):
'''simple docstring'''
return True
@register_to_config
def __init__( self , A_ = 1000 , A_ = 0.00_01 , A_ = 0.02 , A_ = "linear" , A_ = None , A_ = "fixed_small" , A_ = True , A_ = "epsilon" , A_ = jnp.floataa , ):
'''simple docstring'''
UpperCamelCase : Any = dtype
def __UpperCamelCase( self , A_ = None ):
'''simple docstring'''
if common is None:
UpperCamelCase : Optional[int] = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
UpperCamelCase : Union[str, Any] = jnp.array(1.0 , dtype=self.dtype )
UpperCamelCase : Dict = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=_UpperCAmelCase , init_noise_sigma=_UpperCAmelCase , timesteps=_UpperCAmelCase , )
def __UpperCamelCase( self , A_ , A_ , A_ = None ):
'''simple docstring'''
return sample
def __UpperCamelCase( self , A_ , A_ , A_ = () ):
'''simple docstring'''
UpperCamelCase : Dict = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
UpperCamelCase : Optional[Any] = (jnp.arange(0 , _UpperCAmelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=_UpperCAmelCase , timesteps=_UpperCAmelCase , )
def __UpperCamelCase( self , A_ , A_ , A_=None , A_=None ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = state.common.alphas_cumprod[t]
UpperCamelCase : Optional[int] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCamelCase : Optional[Any] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
UpperCamelCase : Tuple = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
UpperCamelCase : Dict = jnp.clip(_UpperCAmelCase , a_min=1e-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
UpperCamelCase : int = jnp.log(jnp.clip(_UpperCAmelCase , a_min=1e-20 ) )
elif variance_type == "fixed_large":
UpperCamelCase : Dict = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
UpperCamelCase : Tuple = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
UpperCamelCase : Optional[Any] = variance
UpperCamelCase : Any = state.common.betas[t]
UpperCamelCase : str = (predicted_variance + 1) / 2
UpperCamelCase : Dict = frac * max_log + (1 - frac) * min_log
return variance
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , A_ = None , A_ = True , ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = timestep
if key is None:
UpperCamelCase : Union[str, Any] = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
UpperCamelCase : List[str] = jnp.split(_UpperCAmelCase , sample.shape[1] , axis=1 )
else:
UpperCamelCase : Any = None
# 1. compute alphas, betas
UpperCamelCase : List[Any] = state.common.alphas_cumprod[t]
UpperCamelCase : int = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
UpperCamelCase : Tuple = 1 - alpha_prod_t
UpperCamelCase : List[Any] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCamelCase : int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCamelCase : Any = model_output
elif self.config.prediction_type == "v_prediction":
UpperCamelCase : Union[str, Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` """
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCamelCase : Union[str, Any] = jnp.clip(_UpperCAmelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCamelCase : Dict = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
UpperCamelCase : Any = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCamelCase : List[Any] = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
UpperCamelCase : List[Any] = jax.random.split(_UpperCAmelCase , num=1 )
UpperCamelCase : int = jax.random.normal(_UpperCAmelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(_UpperCAmelCase , _UpperCAmelCase , predicted_variance=_UpperCAmelCase ) ** 0.5) * noise
UpperCamelCase : List[str] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
UpperCamelCase : Tuple = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=_UpperCAmelCase , state=_UpperCAmelCase )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
return add_noise_common(state.common , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def __UpperCamelCase( self , A_ , A_ , A_ , A_ , ):
'''simple docstring'''
return get_velocity_common(state.common , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
def __len__( self ):
'''simple docstring'''
return self.config.num_train_timesteps
| 52 |
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __magic_name__ ( _UpperCamelCase ):
@require_torch
def __lowercase ( self : Tuple ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a : Optional[int] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_a : List[str] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_a : Tuple = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_a : List[Any] = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_UpperCAmelCase )
BertModel.from_pretrained(_UpperCAmelCase )
BertTokenizer.from_pretrained(_UpperCAmelCase )
pipeline(task='fill-mask' ,model=_UpperCAmelCase )
# baseline - just load from_pretrained with normal network
_a : Optional[int] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_a : Tuple = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : int = '1'
_a : List[Any] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def __lowercase ( self : Any ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a : Dict = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_a : Optional[int] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_a : Optional[Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_a : int = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_UpperCAmelCase )
BertModel.from_pretrained(_UpperCAmelCase )
BertTokenizer.from_pretrained(_UpperCAmelCase )
pipeline(task='fill-mask' ,model=_UpperCAmelCase )
# baseline - just load from_pretrained with normal network
_a : Optional[int] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_a : str = self.get_env()
_a : Optional[Any] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def __lowercase ( self : List[str] ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a : Union[str, Any] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
_a : Optional[Any] = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
_a : str = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
_a : Optional[Any] = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_a : Dict = self.get_env()
_a : int = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
# next emulate no network
_a : List[Any] = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : int = '1'
_a : Any = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def __lowercase ( self : int ):
_a : Optional[Any] = '\nfrom transformers import pipeline\n '
_a : str = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
_a : List[str] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
_a : List[Any] = self.get_env()
_a : Dict = '1'
_a : Dict = [sys.executable, '-c', '\n'.join([load, mock, run] )]
_a : str = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,1 ,result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' ,result.stderr.decode().replace('\n' ,'' ) ,)
@require_torch
def __lowercase ( self : int ):
_a : Optional[int] = '\nfrom transformers import AutoModel\n '
_a : List[Any] = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
_a : Union[str, Any] = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_a : Tuple = self.get_env()
_a : List[str] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : Optional[Any] = '1'
_a : Any = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
| 89 | 0 |
import os
def __SCREAMING_SNAKE_CASE ( ) -> int:
"""simple docstring"""
with open(os.path.dirname(lowerCAmelCase_ ) + """/grid.txt""" ) as f:
SCREAMING_SNAKE_CASE__ = [] # noqa: E741
for _ in range(20 ):
l.append([int(lowerCAmelCase_ ) for x in f.readline().split()] )
SCREAMING_SNAKE_CASE__ = 0
# right
for i in range(20 ):
for j in range(17 ):
SCREAMING_SNAKE_CASE__ = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
SCREAMING_SNAKE_CASE__ = temp
# down
for i in range(17 ):
for j in range(20 ):
SCREAMING_SNAKE_CASE__ = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
SCREAMING_SNAKE_CASE__ = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
SCREAMING_SNAKE_CASE__ = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
SCREAMING_SNAKE_CASE__ = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
SCREAMING_SNAKE_CASE__ = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
SCREAMING_SNAKE_CASE__ = temp
return maximum
if __name__ == "__main__":
print(solution())
| 219 |
'''simple docstring'''
def __lowerCamelCase ( ) -> Tuple:
for n in range(1 , 1000000 ):
yield n * (n + 1) // 2
def __lowerCamelCase ( lowerCAmelCase_ ) -> List[Any]:
_a : Any = 1
_a : Tuple = 2
while i * i <= n:
_a : Tuple = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def __lowerCamelCase ( ) -> str:
return next(i for i in triangle_number_generator() if count_divisors(lowerCAmelCase_ ) > 500 )
if __name__ == "__main__":
print(solution())
| 89 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCamelCase: int = logging.get_logger(__name__)
_UpperCamelCase: List[str] = {'vocab_file': 'spiece.model'}
_UpperCamelCase: Dict = {
'vocab_file': {
'bert_for_seq_generation': (
'https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'
),
}
}
_UpperCamelCase: int = {'bert_for_seq_generation': 5_1_2}
class a__ ( _UpperCamelCase ):
_lowerCamelCase = VOCAB_FILES_NAMES
_lowerCamelCase = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase = []
_lowerCamelCase = ['input_ids', 'attention_mask']
def __init__( self : Union[str, Any], lowerCAmelCase : Any, lowerCAmelCase : Optional[int]="<s>", lowerCAmelCase : Optional[Any]="</s>", lowerCAmelCase : Optional[Any]="<unk>", lowerCAmelCase : Dict="<pad>", lowerCAmelCase : str="<::::>", lowerCAmelCase : Optional[Dict[str, Any]] = None, **lowerCAmelCase : Any, ) -> str:
lowercase : int = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=_UpperCAmelCase, eos_token=_UpperCAmelCase, unk_token=_UpperCAmelCase, pad_token=_UpperCAmelCase, sep_token=_UpperCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **_UpperCAmelCase, )
lowercase : Dict = vocab_file
lowercase : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_UpperCAmelCase )
@property
def lowercase ( self : Tuple ) -> Dict:
return self.sp_model.get_piece_size()
def lowercase ( self : str ) -> str:
lowercase : List[Any] = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ) -> List[Any]:
lowercase : Tuple = self.__dict__.copy()
lowercase : str = None
return state
def __setstate__( self : Optional[Any], lowerCAmelCase : Optional[int] ) -> List[str]:
lowercase : Optional[int] = d
# for backward compatibility
if not hasattr(self, 'sp_model_kwargs' ):
lowercase : Dict = {}
lowercase : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def lowercase ( self : Any, lowerCAmelCase : str ) -> List[Any]:
return self.sp_model.encode(_UpperCAmelCase, out_type=_UpperCAmelCase )
def lowercase ( self : Optional[int], lowerCAmelCase : Tuple ) -> List[Any]:
return self.sp_model.piece_to_id(_UpperCAmelCase )
def lowercase ( self : Optional[int], lowerCAmelCase : str ) -> Union[str, Any]:
lowercase : Union[str, Any] = self.sp_model.IdToPiece(_UpperCAmelCase )
return token
def lowercase ( self : str, lowerCAmelCase : Tuple ) -> str:
lowercase : Any = []
lowercase : Any = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_UpperCAmelCase ) + token
lowercase : Any = []
else:
current_sub_tokens.append(_UpperCAmelCase )
out_string += self.sp_model.decode(_UpperCAmelCase )
return out_string.strip()
def lowercase ( self : Any, lowerCAmelCase : str, lowerCAmelCase : Optional[str] = None ) -> Optional[Any]:
if not os.path.isdir(_UpperCAmelCase ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
lowercase : List[Any] = os.path.join(
_UpperCAmelCase, (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase, 'wb' ) as fi:
lowercase : Tuple = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,)
| 255 |
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class __magic_name__ ( _UpperCamelCase ):
def __init__( self : Optional[int] ,_UpperCAmelCase : Union[str, "sqlalchemy.sql.Selectable"] ,_UpperCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] ,_UpperCAmelCase : Optional[Features] = None ,_UpperCAmelCase : str = None ,_UpperCAmelCase : bool = False ,**_UpperCAmelCase : Dict ,):
super().__init__(features=_UpperCAmelCase ,cache_dir=_UpperCAmelCase ,keep_in_memory=_UpperCAmelCase ,**_UpperCAmelCase )
_a : Tuple = Sql(
cache_dir=_UpperCAmelCase ,features=_UpperCAmelCase ,sql=_UpperCAmelCase ,con=_UpperCAmelCase ,**_UpperCAmelCase ,)
def __lowercase ( self : Dict ):
_a : Optional[Any] = None
_a : Dict = None
_a : Dict = None
_a : Optional[int] = None
self.builder.download_and_prepare(
download_config=_UpperCAmelCase ,download_mode=_UpperCAmelCase ,verification_mode=_UpperCAmelCase ,base_path=_UpperCAmelCase ,)
# Build dataset for splits
_a : List[str] = self.builder.as_dataset(
split='train' ,verification_mode=_UpperCAmelCase ,in_memory=self.keep_in_memory )
return dataset
class __magic_name__ :
def __init__( self : Optional[int] ,_UpperCAmelCase : Dataset ,_UpperCAmelCase : str ,_UpperCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] ,_UpperCAmelCase : Optional[int] = None ,_UpperCAmelCase : Optional[int] = None ,**_UpperCAmelCase : Dict ,):
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" )
_a : Dict = dataset
_a : List[Any] = name
_a : Tuple = con
_a : Union[str, Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_a : List[Any] = num_proc
_a : Tuple = to_sql_kwargs
def __lowercase ( self : List[Any] ):
_a : Tuple = self.to_sql_kwargs.pop('sql' ,_UpperCAmelCase )
_a : str = self.to_sql_kwargs.pop('con' ,_UpperCAmelCase )
_a : Optional[Any] = self.to_sql_kwargs.pop('index' ,_UpperCAmelCase )
_a : Any = self._write(index=_UpperCAmelCase ,**self.to_sql_kwargs )
return written
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Dict ):
_a , _a , _a : Any = args
_a : Tuple = {**to_sql_kwargs, 'if_exists': 'append'} if offset > 0 else to_sql_kwargs
_a : Dict = query_table(
table=self.dataset.data ,key=slice(_UpperCAmelCase ,offset + self.batch_size ) ,indices=self.dataset._indices ,)
_a : Tuple = batch.to_pandas()
_a : Dict = df.to_sql(self.name ,self.con ,index=_UpperCAmelCase ,**_UpperCAmelCase )
return num_rows or len(_UpperCAmelCase )
def __lowercase ( self : int ,_UpperCAmelCase : Optional[int] ,**_UpperCAmelCase : List[Any] ):
_a : Union[str, Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 ,len(self.dataset ) ,self.batch_size ) ,unit='ba' ,disable=not logging.is_progress_bar_enabled() ,desc='Creating SQL from Arrow format' ,):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
_a , _a : List[Any] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql ,[(offset, index, to_sql_kwargs) for offset in range(0 ,_UpperCAmelCase ,_UpperCAmelCase )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit='ba' ,disable=not logging.is_progress_bar_enabled() ,desc='Creating SQL from Arrow format' ,):
written += num_rows
return written
| 89 | 0 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_lxmert import LxmertTokenizer
a : List[Any] = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
a : List[str] = {
"vocab_file": {
"unc-nlp/lxmert-base-uncased": "https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/vocab.txt",
},
"tokenizer_file": {
"unc-nlp/lxmert-base-uncased": (
"https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/tokenizer.json"
),
},
}
a : List[Any] = {
"unc-nlp/lxmert-base-uncased": 5_12,
}
a : Tuple = {
"unc-nlp/lxmert-base-uncased": {"do_lower_case": True},
}
class UpperCamelCase__ ( _UpperCamelCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE__ : List[Any] = PRETRAINED_INIT_CONFIGURATION
SCREAMING_SNAKE_CASE__ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
SCREAMING_SNAKE_CASE__ : Any = LxmertTokenizer
def __init__( self , snake_case=None , snake_case=None , snake_case=True , snake_case="[UNK]" , snake_case="[SEP]" , snake_case="[PAD]" , snake_case="[CLS]" , snake_case="[MASK]" , snake_case=True , snake_case=None , **snake_case , ):
'''simple docstring'''
super().__init__(
_UpperCAmelCase , tokenizer_file=_UpperCAmelCase , do_lower_case=_UpperCAmelCase , unk_token=_UpperCAmelCase , sep_token=_UpperCAmelCase , pad_token=_UpperCAmelCase , cls_token=_UpperCAmelCase , mask_token=_UpperCAmelCase , tokenize_chinese_chars=_UpperCAmelCase , strip_accents=_UpperCAmelCase , **_UpperCAmelCase , )
UpperCAmelCase : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , _UpperCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , _UpperCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , _UpperCAmelCase ) != tokenize_chinese_chars
):
UpperCAmelCase : Optional[Any] = getattr(_UpperCAmelCase , normalizer_state.pop("type" ) )
UpperCAmelCase : Tuple = do_lower_case
UpperCAmelCase : Union[str, Any] = strip_accents
UpperCAmelCase : Union[str, Any] = tokenize_chinese_chars
UpperCAmelCase : Dict = normalizer_class(**_UpperCAmelCase )
UpperCAmelCase : Tuple = do_lower_case
def A_ ( self , snake_case , snake_case=None ):
'''simple docstring'''
UpperCAmelCase : List[Any] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCAmelCase : List[str] = [self.sep_token_id]
UpperCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A_ ( self , snake_case , snake_case = None ):
'''simple docstring'''
UpperCAmelCase : List[Any] = self._tokenizer.model.save(_UpperCAmelCase , name=_UpperCAmelCase )
return tuple(_UpperCAmelCase )
| 311 |
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> np.ndarray:
_a : Union[str, Any] = cva.getAffineTransform(lowerCAmelCase_ , lowerCAmelCase_ )
return cva.warpAffine(lowerCAmelCase_ , lowerCAmelCase_ , (rows, cols) )
if __name__ == "__main__":
# read original image
__lowerCAmelCase = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
__lowerCAmelCase = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
__lowerCAmelCase , __lowerCAmelCase = gray_img.shape
# set different points to rotate image
__lowerCAmelCase = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
__lowerCAmelCase = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
__lowerCAmelCase = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
__lowerCAmelCase = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
__lowerCAmelCase = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
__lowerCAmelCase = plt.figure(1)
__lowerCAmelCase = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 89 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ = {'configuration_focalnet': ['FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP', 'FocalNetConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
'FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST',
'FocalNetForImageClassification',
'FocalNetForMaskedImageModeling',
'FocalNetBackbone',
'FocalNetModel',
'FocalNetPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_focalnet import FOCALNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FocalNetConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_focalnet import (
FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
FocalNetPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 152 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
'''configuration_bigbird_pegasus''': [
'''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BigBirdPegasusConfig''',
'''BigBirdPegasusOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BigBirdPegasusForCausalLM''',
'''BigBirdPegasusForConditionalGeneration''',
'''BigBirdPegasusForQuestionAnswering''',
'''BigBirdPegasusForSequenceClassification''',
'''BigBirdPegasusModel''',
'''BigBirdPegasusPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89 | 0 |
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
lowerCAmelCase__ : Dict = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __snake_case :
def __init__( self , __UpperCamelCase , __UpperCamelCase=16 , __UpperCamelCase=13 , __UpperCamelCase=7 , __UpperCamelCase=14 , __UpperCamelCase=10 , __UpperCamelCase=19 , __UpperCamelCase=5 , __UpperCamelCase=4 , __UpperCamelCase=True , __UpperCamelCase=16 , __UpperCamelCase=2 , __UpperCamelCase=4 , __UpperCamelCase=4 , __UpperCamelCase="gelu" , __UpperCamelCase=0.1 , __UpperCamelCase=0.1 , __UpperCamelCase=[1, 2, 3, 4, 5] , __UpperCamelCase=25 , __UpperCamelCase=5 , ) -> int:
'''simple docstring'''
snake_case__ : int = d_model
snake_case__ : Optional[Any] = parent
snake_case__ : Optional[int] = batch_size
snake_case__ : str = prediction_length
snake_case__ : Any = context_length
snake_case__ : Dict = cardinality
snake_case__ : List[str] = num_time_features
snake_case__ : Tuple = lags_sequence
snake_case__ : Any = embedding_dimension
snake_case__ : Optional[int] = is_training
snake_case__ : str = hidden_size
snake_case__ : int = num_hidden_layers
snake_case__ : List[Any] = num_attention_heads
snake_case__ : Optional[Any] = intermediate_size
snake_case__ : Any = hidden_act
snake_case__ : str = hidden_dropout_prob
snake_case__ : List[Any] = attention_probs_dropout_prob
snake_case__ : Any = context_length
snake_case__ : Union[str, Any] = prediction_length + label_length
snake_case__ : Optional[int] = label_length
snake_case__ : Optional[int] = moving_average
snake_case__ : Union[str, Any] = autocorrelation_factor
def __a ( self ) -> int:
'''simple docstring'''
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def __a ( self , __UpperCamelCase ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : str = config.context_length + max(config.lags_sequence )
snake_case__ : Dict = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
snake_case__ : Any = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
snake_case__ : Optional[int] = floats_tensor([self.batch_size, _past_length] )
snake_case__ : str = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
snake_case__ : str = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
snake_case__ : List[str] = floats_tensor([self.batch_size, config.prediction_length] )
snake_case__ : List[str] = {
'past_values': past_values,
'static_categorical_features': static_categorical_features,
'past_time_features': past_time_features,
'past_observed_mask': past_observed_mask,
'future_time_features': future_time_features,
'future_values': future_values,
}
return inputs_dict
def __a ( self ) -> int:
'''simple docstring'''
snake_case__ : str = self.get_config()
snake_case__ : Optional[Any] = self.prepare_autoformer_inputs_dict(_UpperCAmelCase )
return config, inputs_dict
def __a ( self ) -> Dict:
'''simple docstring'''
snake_case__ : Union[str, Any] = self.prepare_config_and_inputs()
return config, inputs_dict
def __a ( self , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
'''simple docstring'''
snake_case__ : Any = AutoformerModel(config=_UpperCAmelCase ).to(_UpperCAmelCase ).eval()
snake_case__ : List[Any] = model(**_UpperCAmelCase )
snake_case__ : List[Any] = outputs.encoder_last_hidden_state
snake_case__ : int = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : Optional[int] = model.get_encoder()
encoder.save_pretrained(_UpperCAmelCase )
snake_case__ : Union[str, Any] = AutoformerEncoder.from_pretrained(_UpperCAmelCase ).to(_UpperCAmelCase )
snake_case__ : str = model.create_network_inputs(**_UpperCAmelCase )
snake_case__ : str = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
snake_case__ : Union[str, Any] = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
snake_case__ : Union[str, Any] = encoder(inputs_embeds=_UpperCAmelCase )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1E-3 )
snake_case__ : List[str] = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
snake_case__ : Union[str, Any] = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
snake_case__ : Any = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
snake_case__ : List[str] = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : Union[str, Any] = model.get_decoder()
decoder.save_pretrained(_UpperCAmelCase )
snake_case__ : int = AutoformerDecoder.from_pretrained(_UpperCAmelCase ).to(_UpperCAmelCase )
snake_case__ : Tuple = decoder(
trend=_UpperCAmelCase , inputs_embeds=_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1E-3 )
@require_torch
class __snake_case ( _UpperCamelCase ,_UpperCamelCase ,unittest.TestCase ):
__lowerCamelCase = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
__lowerCamelCase = (AutoformerForPrediction,) if is_torch_available() else ()
__lowerCamelCase = {'feature-extraction': AutoformerModel} if is_torch_available() else {}
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
def __a ( self ) -> List[Any]:
'''simple docstring'''
snake_case__ : int = AutoformerModelTester(self )
snake_case__ : Union[str, Any] = ConfigTester(self , config_class=_UpperCAmelCase , has_text_modality=_UpperCAmelCase )
def __a ( self ) -> List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
snake_case__ : Dict = model_class(_UpperCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_UpperCAmelCase )
snake_case__ : List[str] = model_class.from_pretrained(_UpperCAmelCase , output_loading_info=_UpperCAmelCase )
self.assertEqual(info['missing_keys'] , [] )
def __a ( self ) -> List[Any]:
'''simple docstring'''
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*_UpperCAmelCase )
@unittest.skip(reason='Model has no tokens embeddings' )
def __a ( self ) -> Tuple:
'''simple docstring'''
pass
def __a ( self ) -> int:
'''simple docstring'''
snake_case__ : Any = inspect.signature(getattr(_UpperCAmelCase , 'forward' ) )
# The main input is the name of the argument after `self`
snake_case__ : List[str] = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , _UpperCAmelCase )
def __a ( self ) -> Optional[int]:
'''simple docstring'''
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : str = model_class(_UpperCAmelCase )
snake_case__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Optional[Any] = [*signature.parameters.keys()]
snake_case__ : Dict = [
'past_values',
'past_time_features',
'past_observed_mask',
'static_categorical_features',
'static_real_features',
'future_values',
'future_time_features',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('future_observed_mask' )
expected_arg_names.extend(
[
'decoder_attention_mask',
'head_mask',
'decoder_head_mask',
'cross_attn_head_mask',
'encoder_outputs',
'past_key_values',
'output_hidden_states',
'output_attentions',
'use_cache',
'return_dict',
] )
self.assertListEqual(arg_names[: len(_UpperCAmelCase )] , _UpperCAmelCase )
def __a ( self ) -> str:
'''simple docstring'''
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : str = True
snake_case__ : Optional[Any] = getattr(self.model_tester , 'seq_length' , _UpperCAmelCase )
snake_case__ : List[Any] = getattr(self.model_tester , 'decoder_seq_length' , _UpperCAmelCase )
snake_case__ : Optional[int] = getattr(self.model_tester , 'encoder_seq_length' , _UpperCAmelCase )
snake_case__ : Union[str, Any] = getattr(self.model_tester , 'd_model' , _UpperCAmelCase )
snake_case__ : Optional[int] = getattr(self.model_tester , 'num_attention_heads' , _UpperCAmelCase )
snake_case__ : Tuple = d_model // num_attention_heads
for model_class in self.all_model_classes:
snake_case__ : Optional[int] = True
snake_case__ : int = False
snake_case__ : str = True
snake_case__ : Any = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
snake_case__ : List[str] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
snake_case__ : int = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case__ : Optional[int] = True
snake_case__ : Optional[Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
snake_case__ : Tuple = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
snake_case__ : int = outputs.encoder_attentions
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
snake_case__ : Tuple = len(_UpperCAmelCase )
snake_case__ : Optional[Any] = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(_UpperCAmelCase , _UpperCAmelCase )
# decoder attentions
snake_case__ : str = outputs.decoder_attentions
self.assertIsInstance(_UpperCAmelCase , (list, tuple) )
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
snake_case__ : Any = outputs.cross_attentions
self.assertIsInstance(_UpperCAmelCase , (list, tuple) )
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
snake_case__ : str = True
snake_case__ : int = True
snake_case__ : Dict = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
snake_case__ : Optional[int] = model(**self._prepare_for_class(_UpperCAmelCase , _UpperCAmelCase ) )
self.assertEqual(out_len + 2 , len(_UpperCAmelCase ) )
snake_case__ : Tuple = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(_UpperCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def __a ( self ) -> Any:
'''simple docstring'''
super().test_retain_grad_hidden_states_attentions()
def UpperCamelCase__ ( A__="train-batch.pt" ) -> List[Any]:
snake_case__ : Union[str, Any] = hf_hub_download(repo_id='hf-internal-testing/tourism-monthly-batch' , filename=lowerCAmelCase_ , repo_type='dataset' )
snake_case__ : Union[str, Any] = torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_ )
return batch
@require_torch
@slow
class __snake_case ( unittest.TestCase ):
def __a ( self ) -> Any:
'''simple docstring'''
snake_case__ : Optional[Any] = AutoformerModel.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(_UpperCAmelCase )
snake_case__ : Optional[Any] = prepare_batch()
with torch.no_grad():
snake_case__ : Union[str, Any] = model(
past_values=batch['past_values'] , past_time_features=batch['past_time_features'] , past_observed_mask=batch['past_observed_mask'] , static_categorical_features=batch['static_categorical_features'] , future_values=batch['future_values'] , future_time_features=batch['future_time_features'] , )[0]
snake_case__ : List[str] = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , _UpperCAmelCase )
snake_case__ : Any = torch.tensor(
[[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=_UpperCAmelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = AutoformerForPrediction.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(_UpperCAmelCase )
snake_case__ : Optional[int] = prepare_batch('val-batch.pt' )
with torch.no_grad():
snake_case__ : Optional[Any] = model(
past_values=batch['past_values'] , past_time_features=batch['past_time_features'] , past_observed_mask=batch['past_observed_mask'] , static_categorical_features=batch['static_categorical_features'] , ).encoder_last_hidden_state
snake_case__ : Any = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , _UpperCAmelCase )
snake_case__ : Union[str, Any] = torch.tensor(
[[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=_UpperCAmelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , _UpperCAmelCase , atol=_UpperCAmelCase ) )
def __a ( self ) -> Union[str, Any]:
'''simple docstring'''
snake_case__ : Optional[Any] = AutoformerForPrediction.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(_UpperCAmelCase )
snake_case__ : Tuple = prepare_batch('val-batch.pt' )
with torch.no_grad():
snake_case__ : Union[str, Any] = model.generate(
static_categorical_features=batch['static_categorical_features'] , past_time_features=batch['past_time_features'] , past_values=batch['past_values'] , future_time_features=batch['future_time_features'] , past_observed_mask=batch['past_observed_mask'] , )
snake_case__ : List[Any] = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , _UpperCAmelCase )
snake_case__ : List[str] = torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=_UpperCAmelCase )
snake_case__ : Union[str, Any] = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , _UpperCAmelCase , rtol=1E-1 ) )
| 143 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1024 , lowerCAmelCase_=1024 , lowerCAmelCase_=False , **lowerCAmelCase_ ) -> List[Any]:
_a : str = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
_a : List[Any] = SeqaSeqDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , type_path='train' , **lowerCAmelCase_ )
_a : List[str] = tok.pad_token_id
def get_lens(lowerCAmelCase_ ):
_a : Dict = tqdm(
DataLoader(lowerCAmelCase_ , batch_size=512 , num_workers=8 , shuffle=lowerCAmelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
_a : Union[str, Any] = []
for batch in dl:
_a : Optional[Any] = batch['input_ids'].ne(lowerCAmelCase_ ).sum(1 ).tolist()
_a : Optional[Any] = batch['labels'].ne(lowerCAmelCase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
max_lens.append(max(lowerCAmelCase_ , lowerCAmelCase_ ) )
else:
max_lens.extend(lowerCAmelCase_ )
return max_lens
_a : str = get_lens(lowerCAmelCase_ )
_a : Optional[int] = SeqaSeqDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , type_path='val' , **lowerCAmelCase_ )
_a : Dict = get_lens(lowerCAmelCase_ )
pickle_save(lowerCAmelCase_ , train_ds.len_file )
pickle_save(lowerCAmelCase_ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 89 | 0 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
_a = logging.get_logger(__name__)
_a = {
'''openai/whisper-base''': '''https://huggingface.co/openai/whisper-base/resolve/main/config.json''',
}
# fmt: off
_a = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 357, 366, 438, 532, 685,
705, 796, 930, 1058, 1220, 1267, 1279, 1303, 1343, 1377,
1391, 1635, 1782, 1875, 2162, 2361, 2488, 3467, 4008, 4211,
4600, 4808, 5299, 5855, 6329, 7203, 9609, 9959, 10563, 10786,
11420, 11709, 11907, 13163, 13697, 13700, 14808, 15306, 16410, 16791,
17992, 19203, 19510, 20724, 22305, 22935, 27007, 30109, 30420, 33409,
34949, 40283, 40493, 40549, 47282, 49146, 50257, 50359, 50360, 50361
]
_a = [
1, 2, 7, 8, 9, 10, 14, 25,
26, 27, 28, 29, 31, 58, 59, 60, 61, 62,
63, 90, 91, 92, 93, 359, 503, 522, 542, 873,
893, 902, 918, 922, 931, 1350, 1853, 1982, 2460, 2627,
3246, 3253, 3268, 3536, 3846, 3961, 4183, 4667, 6585, 6647,
7273, 9061, 9383, 10428, 10929, 11938, 12033, 12331, 12562, 13793,
14157, 14635, 15265, 15618, 16553, 16604, 18362, 18956, 20075, 21675,
22520, 26130, 26161, 26435, 28279, 29464, 31650, 32302, 32470, 36865,
42863, 47425, 49870, 50254, 50258, 50360, 50361, 50362
]
class __lowerCamelCase ( _UpperCamelCase):
"""simple docstring"""
UpperCamelCase__ = 'whisper'
UpperCamelCase__ = ['past_key_values']
UpperCamelCase__ = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , UpperCAmelCase=5_1865 , UpperCAmelCase=80 , UpperCAmelCase=6 , UpperCAmelCase=4 , UpperCAmelCase=6 , UpperCAmelCase=4 , UpperCAmelCase=1536 , UpperCAmelCase=1536 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=5_0257 , UpperCAmelCase=True , UpperCAmelCase=True , UpperCAmelCase="gelu" , UpperCAmelCase=256 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.0 , UpperCAmelCase=0.02 , UpperCAmelCase=False , UpperCAmelCase=1500 , UpperCAmelCase=448 , UpperCAmelCase=5_0256 , UpperCAmelCase=5_0256 , UpperCAmelCase=5_0256 , UpperCAmelCase=None , UpperCAmelCase=[220, 5_0256] , UpperCAmelCase=False , UpperCAmelCase=256 , UpperCAmelCase=False , UpperCAmelCase=0.05 , UpperCAmelCase=10 , UpperCAmelCase=2 , UpperCAmelCase=0.0 , UpperCAmelCase=10 , UpperCAmelCase=0 , UpperCAmelCase=7 , **UpperCAmelCase , ):
"""simple docstring"""
_UpperCAmelCase = vocab_size
_UpperCAmelCase = num_mel_bins
_UpperCAmelCase = d_model
_UpperCAmelCase = encoder_layers
_UpperCAmelCase = encoder_attention_heads
_UpperCAmelCase = decoder_layers
_UpperCAmelCase = decoder_attention_heads
_UpperCAmelCase = decoder_ffn_dim
_UpperCAmelCase = encoder_ffn_dim
_UpperCAmelCase = dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = activation_function
_UpperCAmelCase = init_std
_UpperCAmelCase = encoder_layerdrop
_UpperCAmelCase = decoder_layerdrop
_UpperCAmelCase = use_cache
_UpperCAmelCase = encoder_layers
_UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCAmelCase = max_source_positions
_UpperCAmelCase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
_UpperCAmelCase = classifier_proj_size
_UpperCAmelCase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCAmelCase = apply_spec_augment
_UpperCAmelCase = mask_time_prob
_UpperCAmelCase = mask_time_length
_UpperCAmelCase = mask_time_min_masks
_UpperCAmelCase = mask_feature_prob
_UpperCAmelCase = mask_feature_length
_UpperCAmelCase = mask_feature_min_masks
_UpperCAmelCase = median_filter_width
super().__init__(
pad_token_id=_UpperCAmelCase , bos_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , suppress_tokens=_UpperCAmelCase , begin_suppress_tokens=_UpperCAmelCase , **_UpperCAmelCase , )
class __lowerCamelCase ( _UpperCamelCase):
"""simple docstring"""
@property
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = OrderedDict(
[
('input_features', {0: 'batch', 1: 'feature_size', 2: 'encoder_sequence'}),
] )
if self.use_past:
_UpperCAmelCase = {0: 'batch'}
else:
_UpperCAmelCase = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_UpperCAmelCase , direction='inputs' )
return common_inputs
def UpperCamelCase ( self , UpperCAmelCase , UpperCAmelCase = -1 , UpperCAmelCase = -1 , UpperCAmelCase = False , UpperCAmelCase = None , UpperCAmelCase = 2_2050 , UpperCAmelCase = 5.0 , UpperCAmelCase = 220 , ):
"""simple docstring"""
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=_UpperCAmelCase , framework=_UpperCAmelCase , sampling_rate=_UpperCAmelCase , time_duration=_UpperCAmelCase , frequency=_UpperCAmelCase , )
_UpperCAmelCase = encoder_inputs['input_features'].shape[2]
_UpperCAmelCase = encoder_sequence_length // 2 if self.use_past else seq_length
_UpperCAmelCase = super().generate_dummy_inputs(
preprocessor.tokenizer , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = encoder_inputs.pop('input_features' )
_UpperCAmelCase = decoder_inputs.pop('decoder_input_ids' )
if "past_key_values" in decoder_inputs:
_UpperCAmelCase = decoder_inputs.pop('past_key_values' )
return dummy_inputs
@property
def UpperCamelCase ( self ):
"""simple docstring"""
return 1e-3
| 39 |
'''simple docstring'''
from typing import Any
class __magic_name__ :
def __init__( self : List[Any] ,_UpperCAmelCase : Any ):
_a : List[Any] = data
_a : Union[str, Any] = None
def __repr__( self : Any ):
return F"""Node({self.data})"""
class __magic_name__ :
def __init__( self : int ):
_a : Tuple = None
def __iter__( self : str ):
_a : int = self.head
while node:
yield node.data
_a : Union[str, Any] = node.next
def __len__( self : Optional[Any] ):
return sum(1 for _ in self )
def __repr__( self : str ):
return "->".join([str(_UpperCAmelCase ) for item in self] )
def __getitem__( self : Tuple ,_UpperCAmelCase : int ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : Union[str, Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Any ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
_a : Any = self.head
for _ in range(_UpperCAmelCase ):
_a : Optional[Any] = current.next
_a : Optional[int] = data
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Any ):
self.insert_nth(len(self ) ,_UpperCAmelCase )
def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : Any ):
self.insert_nth(0 ,_UpperCAmelCase )
def __lowercase ( self : str ,_UpperCAmelCase : int ,_UpperCAmelCase : Any ):
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
_a : int = Node(_UpperCAmelCase )
if self.head is None:
_a : str = new_node
elif index == 0:
_a : List[str] = self.head # link new_node to head
_a : Union[str, Any] = new_node
else:
_a : int = self.head
for _ in range(index - 1 ):
_a : Union[str, Any] = temp.next
_a : List[str] = temp.next
_a : Optional[int] = new_node
def __lowercase ( self : Optional[int] ): # print every node data
print(self )
def __lowercase ( self : str ):
return self.delete_nth(0 )
def __lowercase ( self : str ): # delete from tail
return self.delete_nth(len(self ) - 1 )
def __lowercase ( self : List[str] ,_UpperCAmelCase : int = 0 ):
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
_a : Optional[Any] = self.head # default first node
if index == 0:
_a : int = self.head.next
else:
_a : int = self.head
for _ in range(index - 1 ):
_a : str = temp.next
_a : str = temp.next
_a : int = temp.next.next
return delete_node.data
def __lowercase ( self : List[Any] ):
return self.head is None
def __lowercase ( self : Tuple ):
_a : List[Any] = None
_a : Tuple = self.head
while current:
# Store the current node's next node.
_a : Dict = current.next
# Make the current node's next point backwards
_a : str = prev
# Make the previous node be the current node
_a : Tuple = current
# Make the current node the next node (to progress iteration)
_a : Optional[Any] = next_node
# Return prev in order to put the head at the end
_a : int = prev
def __lowerCamelCase ( ) -> None:
_a : List[str] = LinkedList()
assert linked_list.is_empty() is True
assert str(lowerCAmelCase_ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(lowerCAmelCase_ ) == i
linked_list.insert_nth(lowerCAmelCase_ , i + 1 )
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(lowerCAmelCase_ ) == 9
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
_a : Union[str, Any] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(-8 , 1 ) )
def __lowerCamelCase ( ) -> None:
_a : Dict = [
-9,
100,
Node(77345112 ),
'dlrow olleH',
7,
5555,
0,
-192.55_555,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
_a : List[Any] = LinkedList()
for i in test_input:
linked_list.insert_tail(lowerCAmelCase_ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(lowerCAmelCase_ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
_a : List[str] = linked_list.delete_head()
assert result == -9
assert (
str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
_a : Dict = linked_list.delete_tail()
assert result == 12.2
assert (
str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
_a : Optional[Any] = linked_list.delete_nth(10 )
assert result is None
assert (
str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(lowerCAmelCase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(lowerCAmelCase_ )
assert (
str(lowerCAmelCase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(lowerCAmelCase_ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def __lowerCamelCase ( ) -> Union[str, Any]:
from doctest import testmod
testmod()
_a : Optional[int] = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(lowerCAmelCase_ )
print('\nReading/changing Node data using indexing:' )
print(f"""Element at Position 1: {linked_list[1]}""" )
_a : Optional[Any] = input('Enter New Value: ' ).strip()
print('New list:' )
print(lowerCAmelCase_ )
print(f"""length of linked_list is : {len(lowerCAmelCase_ )}""" )
if __name__ == "__main__":
main()
| 89 | 0 |
def __lowerCamelCase ( __a :Optional[Any] = 1_0_0_0 ) -> int:
"""simple docstring"""
A__ = 1, 1
A__ = 2
while True:
A__ = 0
A__ = fa + fa
A__ = fa, f
index += 1
for _ in str(lowerCAmelCase_ ):
i += 1
if i == n:
break
return index
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 274 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__lowerCAmelCase = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class __magic_name__ ( unittest.TestCase ):
def __lowercase ( self : str ,_UpperCAmelCase : Path ,_UpperCAmelCase : Union[str, None] = None ,_UpperCAmelCase : Union[List[str], None] = None ,_UpperCAmelCase : Union[str, List[str], None] = None ,_UpperCAmelCase : bool = True ,):
_a : Dict = [file for file in os.listdir(_UpperCAmelCase ) if os.path.isfile(os.path.join(_UpperCAmelCase ,_UpperCAmelCase ) )]
if identifier is not None:
_a : str = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
for n_ in n_identifier:
_a : int = [file for file in files if n_ not in file]
else:
_a : Optional[Any] = [file for file in files if n_identifier not in file]
_a : Dict = ignore_files or []
ignore_files.append('__init__.py' )
_a : List[str] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' ,_UpperCAmelCase )
if only_modules:
_a : Any = file.split('.' )[0]
try:
_a : Optional[int] = getattr(_UpperCAmelCase ,_UpperCAmelCase )
_a : Dict = doctest.DocTestSuite(_UpperCAmelCase )
_a : Optional[int] = unittest.TextTestRunner().run(_UpperCAmelCase )
self.assertIs(len(result.failures ) ,0 )
except AttributeError:
logger.info(F"""{module_identifier} is not a module.""" )
else:
_a : str = doctest.testfile(str('..' / directory / file ) ,optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed ,0 )
def __lowercase ( self : Union[str, Any] ):
_a : Optional[Any] = Path('src/transformers' )
_a : Optional[Any] = 'modeling'
_a : Union[str, Any] = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(_UpperCAmelCase ,identifier=_UpperCAmelCase ,ignore_files=_UpperCAmelCase )
def __lowercase ( self : int ):
_a : str = Path('src/transformers' )
_a : List[str] = 'tokenization'
self.analyze_directory(_UpperCAmelCase ,identifier=_UpperCAmelCase )
def __lowercase ( self : int ):
_a : Any = Path('src/transformers' )
_a : str = 'configuration'
self.analyze_directory(_UpperCAmelCase ,identifier=_UpperCAmelCase )
def __lowercase ( self : Dict ):
_a : Tuple = Path('src/transformers' )
_a : Optional[int] = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(_UpperCAmelCase ,n_identifier=_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ):
_a : Union[str, Any] = Path('docs/source' )
_a : List[str] = ['favicon.ico']
self.analyze_directory(_UpperCAmelCase ,ignore_files=_UpperCAmelCase ,only_modules=_UpperCAmelCase )
| 89 | 0 |
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def _snake_case ( lowerCAmelCase : Optional[int] , lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = AlbertConfig.from_json_file(lowerCAmelCase_ )
print(f'Building PyTorch model from configuration: {config}' )
SCREAMING_SNAKE_CASE_ : Any = AlbertForPreTraining(lowerCAmelCase_ )
# Load weights from tf checkpoint
load_tf_weights_in_albert(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , lowerCAmelCase_ )
if __name__ == "__main__":
__lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--albert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained ALBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
__lowerCamelCase : int = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 18 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
__lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def __lowerCamelCase ( lowerCAmelCase_ ) -> Optional[Any]:
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
_a : List[Any] = model_type_to_module_name(lowerCAmelCase_ )
_a : Optional[Any] = importlib.import_module(f""".{module_name}""" , 'transformers.models' )
try:
return getattr(lowerCAmelCase_ , lowerCAmelCase_ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(lowerCAmelCase_ , '__name__' , lowerCAmelCase_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_a : Dict = importlib.import_module('transformers' )
if hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
return getattr(lowerCAmelCase_ , lowerCAmelCase_ )
return None
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , **lowerCAmelCase_ , ) -> Tuple:
_a : List[str] = get_file_from_repo(
lowerCAmelCase_ , lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , resume_download=lowerCAmelCase_ , proxies=lowerCAmelCase_ , use_auth_token=lowerCAmelCase_ , revision=lowerCAmelCase_ , local_files_only=lowerCAmelCase_ , )
if resolved_config_file is None:
logger.info(
'Could not locate the image processor configuration file, will try to use the model config instead.' )
return {}
with open(lowerCAmelCase_ , encoding='utf-8' ) as reader:
return json.load(lowerCAmelCase_ )
class __magic_name__ :
def __init__( self : List[str] ):
raise EnvironmentError(
'AutoImageProcessor is designed to be instantiated '
'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(_UpperCAmelCase )
def __lowercase ( cls : Dict ,_UpperCAmelCase : Union[str, Any] ,**_UpperCAmelCase : Optional[Any] ):
_a : Any = kwargs.pop('config' ,_UpperCAmelCase )
_a : Dict = kwargs.pop('trust_remote_code' ,_UpperCAmelCase )
_a : Any = True
_a , _a : Tuple = ImageProcessingMixin.get_image_processor_dict(_UpperCAmelCase ,**_UpperCAmelCase )
_a : List[Any] = config_dict.get('image_processor_type' ,_UpperCAmelCase )
_a : int = None
if "AutoImageProcessor" in config_dict.get('auto_map' ,{} ):
_a : Any = config_dict['auto_map']['AutoImageProcessor']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
_a : List[Any] = config_dict.pop('feature_extractor_type' ,_UpperCAmelCase )
if feature_extractor_class is not None:
logger.warning(
'Could not find image processor class in the image processor config or the model config. Loading'
' based on pattern matching with the model\'s feature extractor configuration.' )
_a : Optional[int] = feature_extractor_class.replace('FeatureExtractor' ,'ImageProcessor' )
if "AutoFeatureExtractor" in config_dict.get('auto_map' ,{} ):
_a : List[Any] = config_dict['auto_map']['AutoFeatureExtractor']
_a : List[str] = feature_extractor_auto_map.replace('FeatureExtractor' ,'ImageProcessor' )
logger.warning(
'Could not find image processor auto map in the image processor config or the model config.'
' Loading based on pattern matching with the model\'s feature extractor configuration.' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : Dict = AutoConfig.from_pretrained(_UpperCAmelCase ,**_UpperCAmelCase )
# It could be in `config.image_processor_type``
_a : Optional[int] = getattr(_UpperCAmelCase ,'image_processor_type' ,_UpperCAmelCase )
if hasattr(_UpperCAmelCase ,'auto_map' ) and "AutoImageProcessor" in config.auto_map:
_a : Union[str, Any] = config.auto_map['AutoImageProcessor']
if image_processor_class is not None:
_a : Optional[int] = image_processor_class_from_name(_UpperCAmelCase )
_a : List[str] = image_processor_auto_map is not None
_a : Optional[int] = image_processor_class is not None or type(_UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING
_a : Optional[int] = resolve_trust_remote_code(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
if has_remote_code and trust_remote_code:
_a : Dict = get_class_from_dynamic_module(
_UpperCAmelCase ,_UpperCAmelCase ,**_UpperCAmelCase )
_a : int = kwargs.pop('code_revision' ,_UpperCAmelCase )
if os.path.isdir(_UpperCAmelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(_UpperCAmelCase ,**_UpperCAmelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(_UpperCAmelCase ,**_UpperCAmelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(_UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING:
_a : Dict = IMAGE_PROCESSOR_MAPPING[type(_UpperCAmelCase )]
return image_processor_class.from_dict(_UpperCAmelCase ,**_UpperCAmelCase )
raise ValueError(
F"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """
F"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def __lowercase ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Dict ):
IMAGE_PROCESSOR_MAPPING.register(_UpperCAmelCase ,_UpperCAmelCase )
| 89 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase__ : Dict = {
'''configuration_convbert''': ['''CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ConvBertConfig''', '''ConvBertOnnxConfig'''],
'''tokenization_convbert''': ['''ConvBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Optional[Any] = ['''ConvBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : List[Any] = [
'''CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ConvBertForMaskedLM''',
'''ConvBertForMultipleChoice''',
'''ConvBertForQuestionAnswering''',
'''ConvBertForSequenceClassification''',
'''ConvBertForTokenClassification''',
'''ConvBertLayer''',
'''ConvBertModel''',
'''ConvBertPreTrainedModel''',
'''load_tf_weights_in_convbert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase__ : Tuple = [
'''TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFConvBertForMaskedLM''',
'''TFConvBertForMultipleChoice''',
'''TFConvBertForQuestionAnswering''',
'''TFConvBertForSequenceClassification''',
'''TFConvBertForTokenClassification''',
'''TFConvBertLayer''',
'''TFConvBertModel''',
'''TFConvBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_convbert import CONVBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvBertConfig, ConvBertOnnxConfig
from .tokenization_convbert import ConvBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_convbert_fast import ConvBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convbert import (
CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvBertForMaskedLM,
ConvBertForMultipleChoice,
ConvBertForQuestionAnswering,
ConvBertForSequenceClassification,
ConvBertForTokenClassification,
ConvBertLayer,
ConvBertModel,
ConvBertPreTrainedModel,
load_tf_weights_in_convbert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convbert import (
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertLayer,
TFConvBertModel,
TFConvBertPreTrainedModel,
)
else:
import sys
lowercase__ : Union[str, Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 338 |
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__lowerCAmelCase = None
__lowerCAmelCase = '''<''' if sys.byteorder == '''little''' else '''>'''
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__lowerCAmelCase = [
np.dtype('''|b1'''),
np.dtype('''|u1'''),
np.dtype('''<u2'''),
np.dtype('''>u2'''),
np.dtype('''<i2'''),
np.dtype('''>i2'''),
np.dtype('''<u4'''),
np.dtype('''>u4'''),
np.dtype('''<i4'''),
np.dtype('''>i4'''),
np.dtype('''<f4'''),
np.dtype('''>f4'''),
np.dtype('''<f8'''),
np.dtype('''>f8'''),
]
@dataclass
class __magic_name__ :
lowerCAmelCase : bool = True
lowerCAmelCase : Optional[str] = None
# Automatically constructed
lowerCAmelCase : ClassVar[str] = "PIL.Image.Image"
lowerCAmelCase : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
lowerCAmelCase : str = field(default='Image' , init=_UpperCamelCase , repr=_UpperCamelCase )
def __call__( self : Union[str, Any] ):
return self.pa_type
def __lowercase ( self : Any ,_UpperCAmelCase : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : Optional[Any] = np.array(_UpperCAmelCase )
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
return {"path": value, "bytes": None}
elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
return {"path": None, "bytes": value}
elif isinstance(_UpperCAmelCase ,np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase ,PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_UpperCAmelCase )
elif value.get('path' ) is not None and os.path.isfile(value['path'] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('path' )}
elif value.get('bytes' ) is not None or value.get('path' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('bytes' ), "path": value.get('path' )}
else:
raise ValueError(
F"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : dict ,_UpperCAmelCase : Optional[int]=None ):
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Image(decode=True) instead.' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support decoding images, please install \'Pillow\'.' )
if token_per_repo_id is None:
_a : Dict = {}
_a , _a : str = value['path'], value['bytes']
if bytes_ is None:
if path is None:
raise ValueError(F"""An image should have one of 'path' or 'bytes' but both are None in {value}.""" )
else:
if is_local_path(_UpperCAmelCase ):
_a : Any = PIL.Image.open(_UpperCAmelCase )
else:
_a : List[Any] = path.split('::' )[-1]
try:
_a : str = string_to_dict(_UpperCAmelCase ,config.HUB_DATASETS_URL )['repo_id']
_a : Optional[Any] = token_per_repo_id.get(_UpperCAmelCase )
except ValueError:
_a : int = None
with xopen(_UpperCAmelCase ,'rb' ,use_auth_token=_UpperCAmelCase ) as f:
_a : Tuple = BytesIO(f.read() )
_a : Union[str, Any] = PIL.Image.open(bytes_ )
else:
_a : Optional[int] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def __lowercase ( self : int ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('binary' ),
"path": Value('string' ),
}
)
def __lowercase ( self : str ,_UpperCAmelCase : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
if pa.types.is_string(storage.type ):
_a : Union[str, Any] = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.binary() )
_a : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, storage] ,['bytes', 'path'] ,mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_a : List[str] = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() )
_a : Any = pa.StructArray.from_arrays([storage, path_array] ,['bytes', 'path'] ,mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('bytes' ) >= 0:
_a : Union[str, Any] = storage.field('bytes' )
else:
_a : Tuple = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.binary() )
if storage.type.get_field_index('path' ) >= 0:
_a : Union[str, Any] = storage.field('path' )
else:
_a : Dict = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() )
_a : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] ,['bytes', 'path'] ,mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
_a : List[str] = pa.array(
[encode_np_array(np.array(_UpperCAmelCase ) )['bytes'] if arr is not None else None for arr in storage.to_pylist()] ,type=pa.binary() ,)
_a : int = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() )
_a : Optional[Any] = pa.StructArray.from_arrays(
[bytes_array, path_array] ,['bytes', 'path'] ,mask=bytes_array.is_null() )
return array_cast(_UpperCAmelCase ,self.pa_type )
def __lowercase ( self : Dict ,_UpperCAmelCase : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(_UpperCAmelCase : Tuple ):
with xopen(_UpperCAmelCase ,'rb' ) as f:
_a : int = f.read()
return bytes_
_a : Any = pa.array(
[
(path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] ,type=pa.binary() ,)
_a : Optional[Any] = pa.array(
[os.path.basename(_UpperCAmelCase ) if path is not None else None for path in storage.field('path' ).to_pylist()] ,type=pa.string() ,)
_a : Dict = pa.StructArray.from_arrays([bytes_array, path_array] ,['bytes', 'path'] ,mask=bytes_array.is_null() )
return array_cast(_UpperCAmelCase ,self.pa_type )
def __lowerCamelCase ( ) -> List[str]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
_a : Dict = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def __lowerCamelCase ( lowerCAmelCase_ ) -> bytes:
_a : Optional[int] = BytesIO()
if image.format in list_image_compression_formats():
_a : Optional[Any] = image.format
else:
_a : str = 'PNG' if image.mode in ['1', 'L', 'LA', 'RGB', 'RGBA'] else 'TIFF'
image.save(lowerCAmelCase_ , format=lowerCAmelCase_ )
return buffer.getvalue()
def __lowerCamelCase ( lowerCAmelCase_ ) -> dict:
if hasattr(lowerCAmelCase_ , 'filename' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(lowerCAmelCase_ )}
def __lowerCamelCase ( lowerCAmelCase_ ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
_a : List[Any] = array.dtype
_a : Optional[int] = dtype.byteorder if dtype.byteorder != '=' else _NATIVE_BYTEORDER
_a : Union[str, Any] = dtype.kind
_a : Union[str, Any] = dtype.itemsize
_a : List[Any] = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
_a : Optional[int] = np.dtype('|u1' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" )
if dtype is not dest_dtype:
warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
_a : Union[str, Any] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
_a : str = dtype_byteorder + dtype_kind + str(lowerCAmelCase_ )
_a : List[Any] = np.dtype(lowerCAmelCase_ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" )
_a : Union[str, Any] = PIL.Image.fromarray(array.astype(lowerCAmelCase_ ) )
return {"path": None, "bytes": image_to_bytes(lowerCAmelCase_ )}
def __lowerCamelCase ( lowerCAmelCase_ ) -> List[dict]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if objs:
_a , _a : Optional[Any] = first_non_null_value(lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(lowerCAmelCase_ , np.ndarray ):
_a : List[str] = no_op_if_value_is_null(lowerCAmelCase_ )
return [obj_to_image_dict_func(lowerCAmelCase_ ) for obj in objs]
elif isinstance(lowerCAmelCase_ , PIL.Image.Image ):
_a : List[str] = no_op_if_value_is_null(lowerCAmelCase_ )
return [obj_to_image_dict_func(lowerCAmelCase_ ) for obj in objs]
else:
return objs
else:
return objs
| 89 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def _lowercase ( __snake_case ,__snake_case ) -> str | Literal[False]:
__lowerCAmelCase : Optional[int] = list(lowerCAmelCase_ )
__lowerCAmelCase : Optional[Any] = list(lowerCAmelCase_ )
__lowerCAmelCase : Union[str, Any] = 0
for i in range(len(lowerCAmelCase_ ) ):
if lista[i] != lista[i]:
count += 1
__lowerCAmelCase : Optional[int] = '_'
if count > 1:
return False
else:
return "".join(lowerCAmelCase_ )
def _lowercase ( __snake_case ) -> list[str]:
__lowerCAmelCase : Optional[int] = []
while True:
__lowerCAmelCase : Any = ['$'] * len(lowerCAmelCase_ )
__lowerCAmelCase : List[str] = []
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 ,len(lowerCAmelCase_ ) ):
__lowerCAmelCase : Optional[int] = compare_string(binary[i] ,binary[j] )
if k is False:
__lowerCAmelCase : Optional[Any] = '*'
__lowerCAmelCase : Optional[Any] = '*'
temp.append("X" )
for i in range(len(lowerCAmelCase_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(lowerCAmelCase_ ) == 0:
return pi
__lowerCAmelCase : Any = list(set(lowerCAmelCase_ ) )
def _lowercase ( __snake_case ,__snake_case ) -> list[str]:
__lowerCAmelCase : int = []
for minterm in minterms:
__lowerCAmelCase : Optional[int] = ''
for _ in range(lowerCAmelCase_ ):
__lowerCAmelCase : Union[str, Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(lowerCAmelCase_ )
return temp
def _lowercase ( __snake_case ,__snake_case ,__snake_case ) -> bool:
__lowerCAmelCase : int = list(lowerCAmelCase_ )
__lowerCAmelCase : Union[str, Any] = list(lowerCAmelCase_ )
__lowerCAmelCase : str = 0
for i in range(len(lowerCAmelCase_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def _lowercase ( __snake_case ,__snake_case ) -> list[str]:
__lowerCAmelCase : List[Any] = []
__lowerCAmelCase : Optional[Any] = [0] * len(lowerCAmelCase_ )
for i in range(len(chart[0] ) ):
__lowerCAmelCase : Union[str, Any] = 0
__lowerCAmelCase : int = -1
for j in range(len(lowerCAmelCase_ ) ):
if chart[j][i] == 1:
count += 1
__lowerCAmelCase : int = j
if count == 1:
__lowerCAmelCase : List[Any] = 1
for i in range(len(lowerCAmelCase_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(lowerCAmelCase_ ) ):
__lowerCAmelCase : Any = 0
temp.append(prime_implicants[i] )
while True:
__lowerCAmelCase : Union[str, Any] = 0
__lowerCAmelCase : List[Any] = -1
__lowerCAmelCase : str = 0
for i in range(len(lowerCAmelCase_ ) ):
__lowerCAmelCase : Union[str, Any] = chart[i].count(1 )
if count_n > max_n:
__lowerCAmelCase : Any = count_n
__lowerCAmelCase : int = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(lowerCAmelCase_ ) ):
__lowerCAmelCase : List[str] = 0
def _lowercase ( __snake_case ,__snake_case ) -> list[list[int]]:
__lowerCAmelCase : int = [[0 for x in range(len(lowerCAmelCase_ ) )] for x in range(len(lowerCAmelCase_ ) )]
for i in range(len(lowerCAmelCase_ ) ):
__lowerCAmelCase : str = prime_implicants[i].count("_" )
for j in range(len(lowerCAmelCase_ ) ):
if is_for_table(prime_implicants[i] ,binary[j] ,lowerCAmelCase_ ):
__lowerCAmelCase : Optional[Any] = 1
return chart
def _lowercase ( ) -> None:
__lowerCAmelCase : Optional[int] = int(input("Enter the no. of variables\n" ) )
__lowerCAmelCase : List[Any] = [
float(lowerCAmelCase_ )
for x in input(
"Enter the decimal representation of Minterms \'Spaces Separated\'\n" ).split()
]
__lowerCAmelCase : List[str] = decimal_to_binary(lowerCAmelCase_ ,lowerCAmelCase_ )
__lowerCAmelCase : Dict = check(lowerCAmelCase_ )
print("Prime Implicants are:" )
print(lowerCAmelCase_ )
__lowerCAmelCase : List[Any] = prime_implicant_chart(lowerCAmelCase_ ,lowerCAmelCase_ )
__lowerCAmelCase : int = selection(lowerCAmelCase_ ,lowerCAmelCase_ )
print("Essential Prime Implicants are:" )
print(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 269 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str | Literal[False]:
_a : Optional[int] = list(lowerCAmelCase_ )
_a : Optional[Any] = list(lowerCAmelCase_ )
_a : Union[str, Any] = 0
for i in range(len(lowerCAmelCase_ ) ):
if lista[i] != lista[i]:
count += 1
_a : Optional[int] = '_'
if count > 1:
return False
else:
return "".join(lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ ) -> list[str]:
_a : Optional[int] = []
while True:
_a : Any = ['$'] * len(lowerCAmelCase_ )
_a : List[str] = []
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
_a : Optional[int] = compare_string(binary[i] , binary[j] )
if k is False:
_a : Optional[Any] = '*'
_a : Optional[Any] = '*'
temp.append('X' )
for i in range(len(lowerCAmelCase_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(lowerCAmelCase_ ) == 0:
return pi
_a : Any = list(set(lowerCAmelCase_ ) )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]:
_a : int = []
for minterm in minterms:
_a : Optional[int] = ''
for _ in range(lowerCAmelCase_ ):
_a : Union[str, Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(lowerCAmelCase_ )
return temp
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> bool:
_a : int = list(lowerCAmelCase_ )
_a : Union[str, Any] = list(lowerCAmelCase_ )
_a : str = 0
for i in range(len(lowerCAmelCase_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]:
_a : List[Any] = []
_a : Optional[Any] = [0] * len(lowerCAmelCase_ )
for i in range(len(chart[0] ) ):
_a : Union[str, Any] = 0
_a : int = -1
for j in range(len(lowerCAmelCase_ ) ):
if chart[j][i] == 1:
count += 1
_a : int = j
if count == 1:
_a : List[Any] = 1
for i in range(len(lowerCAmelCase_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(lowerCAmelCase_ ) ):
_a : Any = 0
temp.append(prime_implicants[i] )
while True:
_a : Union[str, Any] = 0
_a : List[Any] = -1
_a : str = 0
for i in range(len(lowerCAmelCase_ ) ):
_a : Union[str, Any] = chart[i].count(1 )
if count_n > max_n:
_a : Any = count_n
_a : int = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(lowerCAmelCase_ ) ):
_a : List[str] = 0
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[list[int]]:
_a : int = [[0 for x in range(len(lowerCAmelCase_ ) )] for x in range(len(lowerCAmelCase_ ) )]
for i in range(len(lowerCAmelCase_ ) ):
_a : str = prime_implicants[i].count('_' )
for j in range(len(lowerCAmelCase_ ) ):
if is_for_table(prime_implicants[i] , binary[j] , lowerCAmelCase_ ):
_a : Optional[Any] = 1
return chart
def __lowerCamelCase ( ) -> None:
_a : Optional[int] = int(input('Enter the no. of variables\n' ) )
_a : List[Any] = [
float(lowerCAmelCase_ )
for x in input(
'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split()
]
_a : List[str] = decimal_to_binary(lowerCAmelCase_ , lowerCAmelCase_ )
_a : Dict = check(lowerCAmelCase_ )
print('Prime Implicants are:' )
print(lowerCAmelCase_ )
_a : List[Any] = prime_implicant_chart(lowerCAmelCase_ , lowerCAmelCase_ )
_a : int = selection(lowerCAmelCase_ , lowerCAmelCase_ )
print('Essential Prime Implicants are:' )
print(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 89 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
__lowerCamelCase : List[str] = {
"""junnyu/roformer_chinese_small""": """https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json""",
"""junnyu/roformer_chinese_base""": """https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json""",
"""junnyu/roformer_chinese_char_small""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json"""
),
"""junnyu/roformer_chinese_char_base""": (
"""https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json"""
),
"""junnyu/roformer_small_discriminator""": (
"""https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json"""
),
"""junnyu/roformer_small_generator""": (
"""https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json"""
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class A__ ( _UpperCamelCase ):
_UpperCAmelCase :str = 'roformer'
def __init__( self , A_=5_0000 , A_=None , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=1536 , A_=2 , A_=0.02 , A_=1e-12 , A_=0 , A_=False , A_=True , **A_ , ):
'''simple docstring'''
super().__init__(pad_token_id=_UpperCAmelCase , **_UpperCAmelCase )
UpperCamelCase : List[Any] = vocab_size
UpperCamelCase : Tuple = hidden_size if embedding_size is None else embedding_size
UpperCamelCase : int = hidden_size
UpperCamelCase : Union[str, Any] = num_hidden_layers
UpperCamelCase : Any = num_attention_heads
UpperCamelCase : Tuple = hidden_act
UpperCamelCase : Optional[Any] = intermediate_size
UpperCamelCase : str = hidden_dropout_prob
UpperCamelCase : str = attention_probs_dropout_prob
UpperCamelCase : int = max_position_embeddings
UpperCamelCase : int = type_vocab_size
UpperCamelCase : str = initializer_range
UpperCamelCase : List[Any] = layer_norm_eps
UpperCamelCase : Any = rotary_value
UpperCamelCase : int = use_cache
class A__ ( _UpperCamelCase ):
@property
def __UpperCamelCase( self ):
'''simple docstring'''
if self.task == "multiple-choice":
UpperCamelCase : Union[str, Any] = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase : int = {0: 'batch', 1: 'sequence'}
UpperCamelCase : List[Any] = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
("token_type_ids", dynamic_axis),
] )
| 52 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
'''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''],
'''tokenization_cpmant''': ['''CpmAntTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CpmAntForCausalLM''',
'''CpmAntModel''',
'''CpmAntPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89 | 0 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( ) -> int:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(lowerCAmelCase_ ):
requests.request("""GET""" , """https://huggingface.co""" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("""GET""" , """https://huggingface.co""" , timeout=1.0 )
@pytest.mark.integration
def __SCREAMING_SNAKE_CASE ( ) -> str:
"""simple docstring"""
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("""GET""" , """https://huggingface.co""" )
def __SCREAMING_SNAKE_CASE ( ) -> Optional[int]:
"""simple docstring"""
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(lowerCAmelCase_ ):
http_head("""https://huggingface.co""" )
| 219 |
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : str = LayoutLMTokenizer
lowerCAmelCase : Tuple = LayoutLMTokenizerFast
lowerCAmelCase : List[Any] = True
lowerCAmelCase : int = True
def __lowercase ( self : Dict ):
super().setUp()
_a : int = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_a : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __lowercase ( self : Dict ,**_UpperCAmelCase : List[str] ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname ,**_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : Tuple ):
_a : Optional[int] = 'UNwant\u00E9d,running'
_a : List[Any] = 'unwanted, running'
return input_text, output_text
def __lowercase ( self : Optional[int] ):
_a : Optional[Any] = self.tokenizer_class(self.vocab_file )
_a : Optional[Any] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_UpperCAmelCase ,['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) ,[7, 4, 5, 10, 8, 9] )
def __lowercase ( self : Optional[int] ):
pass
| 89 | 0 |
"""simple docstring"""
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase=10_24 , _UpperCAmelCase=10_24 , _UpperCAmelCase=False , **_UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
lowercase : str = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
lowercase : List[Any] = SeqaSeqDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , type_path='train' , **lowerCAmelCase_ )
lowercase : List[str] = tok.pad_token_id
def get_lens(_UpperCAmelCase ):
lowercase : Dict = tqdm(
DataLoader(lowerCAmelCase_ , batch_size=5_12 , num_workers=8 , shuffle=lowerCAmelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
lowercase : Union[str, Any] = []
for batch in dl:
lowercase : Optional[Any] = batch['input_ids'].ne(lowerCAmelCase_ ).sum(1 ).tolist()
lowercase : Optional[Any] = batch['labels'].ne(lowerCAmelCase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
max_lens.append(max(lowerCAmelCase_ , lowerCAmelCase_ ) )
else:
max_lens.extend(lowerCAmelCase_ )
return max_lens
lowercase : str = get_lens(lowerCAmelCase_ )
lowercase : Optional[int] = SeqaSeqDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , type_path='val' , **lowerCAmelCase_ )
lowercase : Dict = get_lens(lowerCAmelCase_ )
pickle_save(lowerCAmelCase_ , train_ds.len_file )
pickle_save(lowerCAmelCase_ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 255 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Any = 'conditional_detr'
lowerCAmelCase : List[str] = ['past_key_values']
lowerCAmelCase : Optional[int] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Optional[int] ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : List[Any]=3 ,_UpperCAmelCase : List[Any]=300 ,_UpperCAmelCase : Dict=6 ,_UpperCAmelCase : List[str]=2048 ,_UpperCAmelCase : Optional[int]=8 ,_UpperCAmelCase : List[Any]=6 ,_UpperCAmelCase : Optional[int]=2048 ,_UpperCAmelCase : Dict=8 ,_UpperCAmelCase : int=0.0 ,_UpperCAmelCase : Optional[Any]=0.0 ,_UpperCAmelCase : Optional[Any]=True ,_UpperCAmelCase : str="relu" ,_UpperCAmelCase : Tuple=256 ,_UpperCAmelCase : Optional[int]=0.1 ,_UpperCAmelCase : str=0.0 ,_UpperCAmelCase : Optional[int]=0.0 ,_UpperCAmelCase : Union[str, Any]=0.02 ,_UpperCAmelCase : List[str]=1.0 ,_UpperCAmelCase : Any=False ,_UpperCAmelCase : int="sine" ,_UpperCAmelCase : List[str]="resnet50" ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : str=False ,_UpperCAmelCase : str=2 ,_UpperCAmelCase : int=5 ,_UpperCAmelCase : Optional[int]=2 ,_UpperCAmelCase : str=1 ,_UpperCAmelCase : Union[str, Any]=1 ,_UpperCAmelCase : List[str]=2 ,_UpperCAmelCase : Union[str, Any]=5 ,_UpperCAmelCase : List[Any]=2 ,_UpperCAmelCase : Optional[int]=0.25 ,**_UpperCAmelCase : Tuple ,):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
_a : Optional[Any] = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : str = backbone_config.get('model_type' )
_a : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
_a : List[Any] = config_class.from_dict(_UpperCAmelCase )
_a : Tuple = use_timm_backbone
_a : Union[str, Any] = backbone_config
_a : List[Any] = num_channels
_a : Union[str, Any] = num_queries
_a : Optional[Any] = d_model
_a : Tuple = encoder_ffn_dim
_a : Dict = encoder_layers
_a : List[str] = encoder_attention_heads
_a : Union[str, Any] = decoder_ffn_dim
_a : Optional[int] = decoder_layers
_a : int = decoder_attention_heads
_a : Optional[int] = dropout
_a : Tuple = attention_dropout
_a : List[Any] = activation_dropout
_a : str = activation_function
_a : Optional[Any] = init_std
_a : Union[str, Any] = init_xavier_std
_a : List[Any] = encoder_layerdrop
_a : List[Any] = decoder_layerdrop
_a : Dict = encoder_layers
_a : List[Any] = auxiliary_loss
_a : Optional[int] = position_embedding_type
_a : List[Any] = backbone
_a : Optional[int] = use_pretrained_backbone
_a : Optional[int] = dilation
# Hungarian matcher
_a : Tuple = class_cost
_a : str = bbox_cost
_a : Any = giou_cost
# Loss coefficients
_a : Tuple = mask_loss_coefficient
_a : Dict = dice_loss_coefficient
_a : Tuple = cls_loss_coefficient
_a : Any = bbox_loss_coefficient
_a : Dict = giou_loss_coefficient
_a : Union[str, Any] = focal_alpha
super().__init__(is_encoder_decoder=_UpperCAmelCase ,**_UpperCAmelCase )
@property
def __lowercase ( self : Dict ):
return self.encoder_attention_heads
@property
def __lowercase ( self : str ):
return self.d_model
def __lowercase ( self : int ):
_a : List[str] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_a : Dict = self.backbone_config.to_dict()
_a : Union[str, Any] = self.__class__.model_type
return output
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : str = version.parse('1.11' )
@property
def __lowercase ( self : Dict ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def __lowercase ( self : Any ):
return 1E-5
@property
def __lowercase ( self : List[Any] ):
return 12
| 89 | 0 |
'''simple docstring'''
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__="attention" ):
'''simple docstring'''
UpperCAmelCase : Tuple = params[F"{prefix}/layers_{i}/{layer_name}/key/kernel"]
UpperCAmelCase : int = params[F"{prefix}/layers_{i}/{layer_name}/out/kernel"]
UpperCAmelCase : List[str] = params[F"{prefix}/layers_{i}/{layer_name}/query/kernel"]
UpperCAmelCase : Any = params[F"{prefix}/layers_{i}/{layer_name}/value/kernel"]
return k, o, q, v
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=False ):
'''simple docstring'''
if split_mlp_wi:
UpperCAmelCase : Optional[int] = params[F"{prefix}/layers_{i}/mlp/wi_0/kernel"]
UpperCAmelCase : Tuple = params[F"{prefix}/layers_{i}/mlp/wi_1/kernel"]
UpperCAmelCase : List[Any] = (wi_a, wi_a)
else:
UpperCAmelCase : Optional[Any] = params[F"{prefix}/layers_{i}/mlp/wi/kernel"]
UpperCAmelCase : Optional[Any] = params[F"{prefix}/layers_{i}/mlp/wo/kernel"]
return wi, wo
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
return params[F"{prefix}/layers_{i}/{layer_name}/scale"]
def lowercase ( __magic_name__ , *, __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = traverse_util.flatten_dict(variables["target"] )
UpperCAmelCase : Tuple = {'/'.join(lowerCAmelCase_ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
UpperCAmelCase : Any = 'encoder/layers_0/mlp/wi_0/kernel' in old
print("Split MLP:" , lowerCAmelCase_ )
UpperCAmelCase : Union[str, Any] = collections.OrderedDict()
# Shared embeddings.
UpperCAmelCase : Optional[Any] = old['token_embedder/embedding']
# Encoder.
for i in range(lowerCAmelCase_ ):
# Block i, layer 0 (Self Attention).
UpperCAmelCase : int = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , "encoder" , "pre_attention_layer_norm" )
UpperCAmelCase : Optional[int] = tax_attention_lookup(lowerCAmelCase_ , lowerCAmelCase_ , "encoder" , "attention" )
UpperCAmelCase : List[str] = layer_norm
UpperCAmelCase : List[str] = k.T
UpperCAmelCase : Optional[int] = o.T
UpperCAmelCase : List[Any] = q.T
UpperCAmelCase : List[Any] = v.T
# Block i, layer 1 (MLP).
UpperCAmelCase : Union[str, Any] = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , "encoder" , "pre_mlp_layer_norm" )
UpperCAmelCase : List[str] = tax_mlp_lookup(lowerCAmelCase_ , lowerCAmelCase_ , "encoder" , lowerCAmelCase_ )
UpperCAmelCase : Union[str, Any] = layer_norm
if split_mlp_wi:
UpperCAmelCase : int = wi[0].T
UpperCAmelCase : List[Any] = wi[1].T
else:
UpperCAmelCase : Any = wi.T
UpperCAmelCase : Tuple = wo.T
UpperCAmelCase : int = old[
'encoder/relpos_bias/rel_embedding'
].T
UpperCAmelCase : Optional[Any] = old['encoder/encoder_norm/scale']
if not is_encoder_only:
# Decoder.
for i in range(lowerCAmelCase_ ):
# Block i, layer 0 (Self Attention).
UpperCAmelCase : Tuple = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , "decoder" , "pre_self_attention_layer_norm" )
UpperCAmelCase : Optional[int] = tax_attention_lookup(lowerCAmelCase_ , lowerCAmelCase_ , "decoder" , "self_attention" )
UpperCAmelCase : Tuple = layer_norm
UpperCAmelCase : Dict = k.T
UpperCAmelCase : Optional[Any] = o.T
UpperCAmelCase : Any = q.T
UpperCAmelCase : int = v.T
# Block i, layer 1 (Cross Attention).
UpperCAmelCase : Union[str, Any] = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , "decoder" , "pre_cross_attention_layer_norm" )
UpperCAmelCase : List[str] = tax_attention_lookup(lowerCAmelCase_ , lowerCAmelCase_ , "decoder" , "encoder_decoder_attention" )
UpperCAmelCase : Optional[Any] = layer_norm
UpperCAmelCase : Any = k.T
UpperCAmelCase : int = o.T
UpperCAmelCase : str = q.T
UpperCAmelCase : List[Any] = v.T
# Block i, layer 2 (MLP).
UpperCAmelCase : Union[str, Any] = tax_layer_norm_lookup(lowerCAmelCase_ , lowerCAmelCase_ , "decoder" , "pre_mlp_layer_norm" )
UpperCAmelCase : Union[str, Any] = tax_mlp_lookup(lowerCAmelCase_ , lowerCAmelCase_ , "decoder" , lowerCAmelCase_ )
UpperCAmelCase : List[str] = layer_norm
if split_mlp_wi:
UpperCAmelCase : List[Any] = wi[0].T
UpperCAmelCase : List[Any] = wi[1].T
else:
UpperCAmelCase : Optional[Any] = wi.T
UpperCAmelCase : Optional[int] = wo.T
UpperCAmelCase : List[Any] = old['decoder/decoder_norm/scale']
UpperCAmelCase : Any = old[
'decoder/relpos_bias/rel_embedding'
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
UpperCAmelCase : Dict = old['decoder/logits_dense/kernel'].T
return new
def lowercase ( __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Tuple = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
UpperCAmelCase : List[Any] = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
UpperCAmelCase : List[str] = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
UpperCAmelCase : Optional[int] = state_dict['shared.weight']
return state_dict
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = checkpoints.load_tax_checkpoint(lowerCAmelCase_ )
UpperCAmelCase : str = convert_tax_to_pytorch(lowerCAmelCase_ , num_layers=config.num_layers , is_encoder_only=lowerCAmelCase_ )
UpperCAmelCase : List[Any] = make_state_dict(lowerCAmelCase_ , lowerCAmelCase_ )
model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = False ):
'''simple docstring'''
UpperCAmelCase : int = TaConfig.from_json_file(lowerCAmelCase_ )
print(F"Building PyTorch model from configuration: {config}" )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
UpperCAmelCase : str = TaEncoderModel(lowerCAmelCase_ )
else:
UpperCAmelCase : Tuple = TaForConditionalGeneration(lowerCAmelCase_ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Save pytorch-model
print(F"Save PyTorch model to {pytorch_dump_path}" )
model.save_pretrained(lowerCAmelCase_ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowerCAmelCase_ )
print("Done" )
if __name__ == "__main__":
a : List[Any] = argparse.ArgumentParser(description="Converts a native T5X checkpoint into a PyTorch checkpoint.")
# Required parameters
parser.add_argument(
"--t5x_checkpoint_path", default=None, type=str, required=True, help="Path to the T5X checkpoint."
)
parser.add_argument(
"--config_file",
default=None,
type=str,
required=True,
help="The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.",
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--is_encoder_only", action="store_true", help="Check if the model is encoder-decoder model", default=False
)
a : List[str] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
)
| 311 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __magic_name__ :
def __init__( self : List[str] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : List[str]=13 ,_UpperCAmelCase : Any=32 ,_UpperCAmelCase : Union[str, Any]=3 ,_UpperCAmelCase : Optional[int]=4 ,_UpperCAmelCase : Optional[Any]=[10, 20, 30, 40] ,_UpperCAmelCase : Tuple=[2, 2, 3, 2] ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Union[str, Any]=37 ,_UpperCAmelCase : Optional[int]="gelu" ,_UpperCAmelCase : Optional[Any]=10 ,_UpperCAmelCase : Tuple=0.02 ,_UpperCAmelCase : Any=["stage2", "stage3", "stage4"] ,_UpperCAmelCase : Any=[2, 3, 4] ,_UpperCAmelCase : Tuple=None ,):
_a : Optional[Any] = parent
_a : List[Any] = batch_size
_a : str = image_size
_a : Union[str, Any] = num_channels
_a : List[Any] = num_stages
_a : Dict = hidden_sizes
_a : int = depths
_a : Tuple = is_training
_a : List[str] = use_labels
_a : Dict = intermediate_size
_a : int = hidden_act
_a : int = num_labels
_a : Any = initializer_range
_a : Tuple = out_features
_a : int = out_indices
_a : List[Any] = scope
def __lowercase ( self : Dict ):
_a : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Union[str, Any] = None
if self.use_labels:
_a : Tuple = ids_tensor([self.batch_size] ,self.num_labels )
_a : str = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Any ):
return ConvNextVaConfig(
num_channels=self.num_channels ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_stages=self.num_stages ,hidden_act=self.hidden_act ,is_decoder=_UpperCAmelCase ,initializer_range=self.initializer_range ,out_features=self.out_features ,out_indices=self.out_indices ,num_labels=self.num_labels ,)
def __lowercase ( self : Tuple ,_UpperCAmelCase : Any ,_UpperCAmelCase : Any ,_UpperCAmelCase : Optional[Any] ):
_a : Optional[Any] = ConvNextVaModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Any = model(_UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def __lowercase ( self : Tuple ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ):
_a : List[Any] = ConvNextVaForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : List[str] = model(_UpperCAmelCase ,labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowercase ( self : str ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[Any] ):
_a : Optional[int] = ConvNextVaBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Dict = model(_UpperCAmelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_a : Tuple = None
_a : List[Any] = ConvNextVaBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : List[str] = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def __lowercase ( self : Optional[Any] ):
_a : Any = self.prepare_config_and_inputs()
_a , _a , _a : Union[str, Any] = config_and_inputs
_a : Any = {'pixel_values': pixel_values}
return config, inputs_dict
def __lowercase ( self : str ):
_a : Tuple = self.prepare_config_and_inputs()
_a , _a , _a : Tuple = config_and_inputs
_a : List[Any] = {'pixel_values': pixel_values, 'labels': labels}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : str = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase : str = (
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase : int = False
lowerCAmelCase : str = False
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : List[str] = False
lowerCAmelCase : Optional[int] = False
def __lowercase ( self : List[Any] ):
_a : str = ConvNextVaModelTester(self )
_a : Tuple = ConfigTester(self ,config_class=_UpperCAmelCase ,has_text_modality=_UpperCAmelCase ,hidden_size=37 )
def __lowercase ( self : Optional[Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowercase ( self : str ):
return
@unittest.skip(reason='ConvNextV2 does not use inputs_embeds' )
def __lowercase ( self : List[Any] ):
pass
@unittest.skip(reason='ConvNextV2 does not support input and output embeddings' )
def __lowercase ( self : Optional[int] ):
pass
@unittest.skip(reason='ConvNextV2 does not use feedforward chunking' )
def __lowercase ( self : Any ):
pass
def __lowercase ( self : List[str] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
_a : Any = True
if model_class.__name__ in [
*get_values(_UpperCAmelCase ),
*get_values(_UpperCAmelCase ),
]:
continue
_a : Optional[Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
_a : str = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase )
_a : Optional[int] = model(**_UpperCAmelCase ).loss
loss.backward()
def __lowercase ( self : str ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
_a : Optional[int] = False
_a : Tuple = True
if (
model_class.__name__
in [*get_values(_UpperCAmelCase ), *get_values(_UpperCAmelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
_a : Tuple = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.train()
_a : Any = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase )
_a : List[Any] = model(**_UpperCAmelCase ).loss
loss.backward()
def __lowercase ( self : List[Any] ):
_a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : int = model_class(_UpperCAmelCase )
_a : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Dict = [*signature.parameters.keys()]
_a : int = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_UpperCAmelCase )
def __lowercase ( self : int ):
_a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def __lowercase ( self : Any ):
def check_hidden_states_output(_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Dict ):
_a : Union[str, Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_a : List[Any] = model(**self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ) )
_a : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_a : str = self.model_tester.num_stages
self.assertEqual(len(_UpperCAmelCase ) ,expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
_a , _a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : int = True
check_hidden_states_output(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : Optional[Any] = True
check_hidden_states_output(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
def __lowercase ( self : List[Any] ):
_a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def __lowercase ( self : int ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Any = ConvNextVaModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __lowerCamelCase ( ) -> List[Any]:
_a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
@cached_property
def __lowercase ( self : Optional[Any] ):
return AutoImageProcessor.from_pretrained('facebook/convnextv2-tiny-1k-224' ) if is_vision_available() else None
@slow
def __lowercase ( self : Any ):
_a : List[str] = ConvNextVaForImageClassification.from_pretrained('facebook/convnextv2-tiny-1k-224' ).to(_UpperCAmelCase )
_a : Optional[int] = self.default_image_processor
_a : str = prepare_img()
_a : str = preprocessor(images=_UpperCAmelCase ,return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_a : Dict = model(**_UpperCAmelCase )
# verify the logits
_a : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,_UpperCAmelCase )
_a : Optional[Any] = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_UpperCAmelCase ,atol=1E-4 ) )
| 89 | 0 |
'''simple docstring'''
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
import datasets
a_ = '\\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n'
a_ = '\\nGLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n'
a_ = '\nCompute GLUE evaluation metric associated to each GLUE dataset.\nArgs:\n predictions: list of predictions to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\nReturns: depending on the GLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "pearson": Pearson Correlation\n "spearmanr": Spearman Correlation\n "matthews_correlation": Matthew Correlation\nExamples:\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'sst2\') # \'sst2\' or any of ["mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'mrpc\') # \'mrpc\' or \'qqp\'\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'stsb\')\n >>> references = [0., 1., 2., 3., 4., 5.]\n >>> predictions = [0., 1., 2., 3., 4., 5.]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print({"pearson": round(results["pearson"], 2), "spearmanr": round(results["spearmanr"], 2)})\n {\'pearson\': 1.0, \'spearmanr\': 1.0}\n\n >>> glue_metric = datasets.load_metric(\'glue\', \'cola\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'matthews_correlation\': 1.0}\n'
def _a( UpperCamelCase__ : List[str], UpperCamelCase__ : Any ):
'''simple docstring'''
return float((preds == labels).mean() )
def _a( UpperCamelCase__ : Any, UpperCamelCase__ : Tuple ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Optional[Any] =simple_accuracy(lowerCAmelCase_, lowerCAmelCase_ )
SCREAMING_SNAKE_CASE__ : Union[str, Any] =float(fa_score(y_true=lowerCAmelCase_, y_pred=lowerCAmelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def _a( UpperCamelCase__ : Dict, UpperCamelCase__ : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple =float(pearsonr(lowerCAmelCase_, lowerCAmelCase_ )[0] )
SCREAMING_SNAKE_CASE__ : str =float(spearmanr(lowerCAmelCase_, lowerCAmelCase_ )[0] )
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
}
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __SCREAMING_SNAKE_CASE ( datasets.Metric ):
def __magic_name__ ( self : str ) -> List[Any]:
if self.config_name not in [
"sst2",
"mnli",
"mnli_mismatched",
"mnli_matched",
"cola",
"stsb",
"mrpc",
"qqp",
"qnli",
"rte",
"wnli",
"hans",
]:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' )
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
'''references''': datasets.Value('''int64''' if self.config_name != '''stsb''' else '''float32''' ),
} ) , codebase_urls=[] , reference_urls=[] , format='''numpy''' , )
def __magic_name__ ( self : int , __lowercase : List[str] , __lowercase : List[Any] ) -> Any:
if self.config_name == "cola":
return {"matthews_correlation": matthews_corrcoef(_UpperCAmelCase , _UpperCAmelCase )}
elif self.config_name == "stsb":
return pearson_and_spearman(_UpperCAmelCase , _UpperCAmelCase )
elif self.config_name in ["mrpc", "qqp"]:
return acc_and_fa(_UpperCAmelCase , _UpperCAmelCase )
elif self.config_name in ["sst2", "mnli", "mnli_mismatched", "mnli_matched", "qnli", "rte", "wnli", "hans"]:
return {"accuracy": simple_accuracy(_UpperCAmelCase , _UpperCAmelCase )}
else:
raise KeyError(
'''You should supply a configuration name selected in '''
'''["sst2", "mnli", "mnli_mismatched", "mnli_matched", '''
'''"cola", "stsb", "mrpc", "qqp", "qnli", "rte", "wnli", "hans"]''' ) | 152 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
'''configuration_lilt''': ['''LILT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LiltConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''LILT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LiltForQuestionAnswering''',
'''LiltForSequenceClassification''',
'''LiltForTokenClassification''',
'''LiltModel''',
'''LiltPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89 | 0 |
import argparse
import json
import os
import torch
from transformers.file_utils import has_file
from diffusers import UNetaDConditionModel, UNetaDModel
lowerCAmelCase__ : Optional[Any] = False
lowerCAmelCase__ : Any = True
lowerCAmelCase__ : Any = False
if __name__ == "__main__":
lowerCAmelCase__ : Tuple = argparse.ArgumentParser()
parser.add_argument(
'''--repo_path''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
lowerCAmelCase__ : Tuple = parser.parse_args()
lowerCAmelCase__ : str = {
'''image_size''': '''sample_size''',
'''num_res_blocks''': '''layers_per_block''',
'''block_channels''': '''block_out_channels''',
'''down_blocks''': '''down_block_types''',
'''up_blocks''': '''up_block_types''',
'''downscale_freq_shift''': '''freq_shift''',
'''resnet_num_groups''': '''norm_num_groups''',
'''resnet_act_fn''': '''act_fn''',
'''resnet_eps''': '''norm_eps''',
'''num_head_channels''': '''attention_head_dim''',
}
lowerCAmelCase__ : Any = {
'''time_steps''': '''time_proj''',
'''mid''': '''mid_block''',
'''downsample_blocks''': '''down_blocks''',
'''upsample_blocks''': '''up_blocks''',
}
lowerCAmelCase__ : Union[str, Any] = '''''' if has_file(args.repo_path, '''config.json''') else '''unet'''
with open(os.path.join(args.repo_path, subfolder, '''config.json'''), '''r''', encoding='''utf-8''') as reader:
lowerCAmelCase__ : Optional[Any] = reader.read()
lowerCAmelCase__ : List[str] = json.loads(text)
if do_only_config:
for key in config_parameters_to_change.keys():
config.pop(key, None)
if has_file(args.repo_path, '''config.json'''):
lowerCAmelCase__ : List[Any] = UNetaDModel(**config)
else:
lowerCAmelCase__ : Tuple = UNetaDConditionModel if '''ldm-text2im-large-256''' in args.repo_path else UNetaDModel
lowerCAmelCase__ : str = class_name(**config)
if do_only_config:
model.save_config(os.path.join(args.repo_path, subfolder))
lowerCAmelCase__ : List[Any] = dict(model.config)
if do_only_renaming:
for key, value in config_parameters_to_change.items():
if key in config:
lowerCAmelCase__ : List[Any] = config[key]
del config[key]
lowerCAmelCase__ : Union[str, Any] = [k.replace('''UNetRes''', '''''') for k in config['''down_block_types''']]
lowerCAmelCase__ : Tuple = [k.replace('''UNetRes''', '''''') for k in config['''up_block_types''']]
if do_only_weights:
lowerCAmelCase__ : int = torch.load(os.path.join(args.repo_path, subfolder, '''diffusion_pytorch_model.bin'''))
lowerCAmelCase__ : Union[str, Any] = {}
for param_key, param_value in state_dict.items():
if param_key.endswith('''.op.bias''') or param_key.endswith('''.op.weight'''):
continue
lowerCAmelCase__ : Any = False
for key, new_key in key_parameters_to_change.items():
if not has_changed and param_key.split('''.''')[0] == key:
lowerCAmelCase__ : int = param_value
lowerCAmelCase__ : Tuple = True
if not has_changed:
lowerCAmelCase__ : int = param_value
model.load_state_dict(new_state_dict)
model.save_pretrained(os.path.join(args.repo_path, subfolder))
| 143 |
'''simple docstring'''
import math
def __lowerCamelCase ( lowerCAmelCase_ ) -> bool:
_a : Optional[int] = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ = 1 / 12345 ) -> int:
_a : int = 0
_a : Optional[Any] = 0
_a : int = 3
while True:
_a : Tuple = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(lowerCAmelCase_ ):
_a : Union[str, Any] = int(lowerCAmelCase_ )
total_partitions += 1
if check_partition_perfect(lowerCAmelCase_ ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(lowerCAmelCase_ )
integer += 1
if __name__ == "__main__":
print(f"""{solution() = }""")
| 89 | 0 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
_a = TypeVar('''T''')
class __lowerCamelCase ( Generic[T]):
"""simple docstring"""
def __init__( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = data
_UpperCAmelCase = None
def __str__( self ):
"""simple docstring"""
return F"""{self.data}"""
class __lowerCamelCase ( Generic[T]):
"""simple docstring"""
def __init__( self ):
"""simple docstring"""
_UpperCAmelCase = None
def __iter__( self ):
"""simple docstring"""
_UpperCAmelCase = self.top
while node:
yield node.data
_UpperCAmelCase = node.next
def __str__( self ):
"""simple docstring"""
return "->".join([str(_UpperCAmelCase ) for item in self] )
def __len__( self ):
"""simple docstring"""
return len(tuple(iter(self ) ) )
def UpperCamelCase ( self ):
"""simple docstring"""
return self.top is None
def UpperCamelCase ( self , UpperCAmelCase ):
"""simple docstring"""
_UpperCAmelCase = Node(_UpperCAmelCase )
if not self.is_empty():
_UpperCAmelCase = self.top
_UpperCAmelCase = node
def UpperCamelCase ( self ):
"""simple docstring"""
if self.is_empty():
raise IndexError('pop from empty stack' )
assert isinstance(self.top , _UpperCAmelCase )
_UpperCAmelCase = self.top
_UpperCAmelCase = self.top.next
return pop_node.data
def UpperCamelCase ( self ):
"""simple docstring"""
if self.is_empty():
raise IndexError('peek from empty stack' )
assert self.top is not None
return self.top.data
def UpperCamelCase ( self ):
"""simple docstring"""
_UpperCAmelCase = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 39 |
'''simple docstring'''
import argparse
import json
import torch
from diffusers import DDPMScheduler, LDMPipeline, UNetaDModel, VQModel
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=1 ) -> Dict:
if n_shave_prefix_segments >= 0:
return ".".join(path.split('.' )[n_shave_prefix_segments:] )
else:
return ".".join(path.split('.' )[:n_shave_prefix_segments] )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=0 ) -> Tuple:
_a : Any = []
for old_item in old_list:
_a : Union[str, Any] = old_item.replace('in_layers.0' , 'norm1' )
_a : Optional[int] = new_item.replace('in_layers.2' , 'conv1' )
_a : str = new_item.replace('out_layers.0' , 'norm2' )
_a : List[str] = new_item.replace('out_layers.3' , 'conv2' )
_a : str = new_item.replace('emb_layers.1' , 'time_emb_proj' )
_a : Tuple = new_item.replace('skip_connection' , 'conv_shortcut' )
_a : Any = shave_segments(lowerCAmelCase_ , n_shave_prefix_segments=lowerCAmelCase_ )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=0 ) -> Any:
_a : List[str] = []
for old_item in old_list:
_a : List[Any] = old_item
_a : Optional[int] = new_item.replace('norm.weight' , 'group_norm.weight' )
_a : Optional[Any] = new_item.replace('norm.bias' , 'group_norm.bias' )
_a : Any = new_item.replace('proj_out.weight' , 'proj_attn.weight' )
_a : Optional[Any] = new_item.replace('proj_out.bias' , 'proj_attn.bias' )
_a : Optional[int] = shave_segments(lowerCAmelCase_ , n_shave_prefix_segments=lowerCAmelCase_ )
mapping.append({'old': old_item, 'new': new_item} )
return mapping
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None ) -> Any:
assert isinstance(lowerCAmelCase_ , lowerCAmelCase_ ), "Paths should be a list of dicts containing 'old' and 'new' keys."
# Splits the attention layers into three variables.
if attention_paths_to_split is not None:
for path, path_map in attention_paths_to_split.items():
_a : Optional[Any] = old_checkpoint[path]
_a : Optional[Any] = old_tensor.shape[0] // 3
_a : Any = (-1, channels) if len(old_tensor.shape ) == 3 else (-1)
_a : int = old_tensor.shape[0] // config['num_head_channels'] // 3
_a : str = old_tensor.reshape((num_heads, 3 * channels // num_heads) + old_tensor.shape[1:] )
_a , _a , _a : Tuple = old_tensor.split(channels // num_heads , dim=1 )
_a : Dict = query.reshape(lowerCAmelCase_ )
_a : str = key.reshape(lowerCAmelCase_ )
_a : Optional[int] = value.reshape(lowerCAmelCase_ )
for path in paths:
_a : Dict = path['new']
# These have already been assigned
if attention_paths_to_split is not None and new_path in attention_paths_to_split:
continue
# Global renaming happens here
_a : Any = new_path.replace('middle_block.0' , 'mid_block.resnets.0' )
_a : str = new_path.replace('middle_block.1' , 'mid_block.attentions.0' )
_a : Union[str, Any] = new_path.replace('middle_block.2' , 'mid_block.resnets.1' )
if additional_replacements is not None:
for replacement in additional_replacements:
_a : int = new_path.replace(replacement['old'] , replacement['new'] )
# proj_attn.weight has to be converted from conv 1D to linear
if "proj_attn.weight" in new_path:
_a : List[str] = old_checkpoint[path['old']][:, :, 0]
else:
_a : Dict = old_checkpoint[path['old']]
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> List[Any]:
_a : Optional[int] = {}
_a : Dict = checkpoint['time_embed.0.weight']
_a : Tuple = checkpoint['time_embed.0.bias']
_a : Union[str, Any] = checkpoint['time_embed.2.weight']
_a : List[str] = checkpoint['time_embed.2.bias']
_a : List[str] = checkpoint['input_blocks.0.0.weight']
_a : Union[str, Any] = checkpoint['input_blocks.0.0.bias']
_a : Optional[int] = checkpoint['out.0.weight']
_a : int = checkpoint['out.0.bias']
_a : List[str] = checkpoint['out.2.weight']
_a : Optional[int] = checkpoint['out.2.bias']
# Retrieves the keys for the input blocks only
_a : Optional[int] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'input_blocks' in layer} )
_a : Dict = {
layer_id: [key for key in checkpoint if f"""input_blocks.{layer_id}""" in key]
for layer_id in range(lowerCAmelCase_ )
}
# Retrieves the keys for the middle blocks only
_a : List[Any] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'middle_block' in layer} )
_a : Union[str, Any] = {
layer_id: [key for key in checkpoint if f"""middle_block.{layer_id}""" in key]
for layer_id in range(lowerCAmelCase_ )
}
# Retrieves the keys for the output blocks only
_a : Optional[int] = len({'.'.join(layer.split('.' )[:2] ) for layer in checkpoint if 'output_blocks' in layer} )
_a : str = {
layer_id: [key for key in checkpoint if f"""output_blocks.{layer_id}""" in key]
for layer_id in range(lowerCAmelCase_ )
}
for i in range(1 , lowerCAmelCase_ ):
_a : List[Any] = (i - 1) // (config['num_res_blocks'] + 1)
_a : Optional[int] = (i - 1) % (config['num_res_blocks'] + 1)
_a : Optional[int] = [key for key in input_blocks[i] if f"""input_blocks.{i}.0""" in key]
_a : Optional[Any] = [key for key in input_blocks[i] if f"""input_blocks.{i}.1""" in key]
if f"""input_blocks.{i}.0.op.weight""" in checkpoint:
_a : List[Any] = checkpoint[
f"""input_blocks.{i}.0.op.weight"""
]
_a : Union[str, Any] = checkpoint[
f"""input_blocks.{i}.0.op.bias"""
]
continue
_a : Any = renew_resnet_paths(lowerCAmelCase_ )
_a : List[str] = {'old': f"""input_blocks.{i}.0""", 'new': f"""down_blocks.{block_id}.resnets.{layer_in_block_id}"""}
_a : Optional[Any] = {'old': 'resnets.2.op', 'new': 'downsamplers.0.op'}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path, resnet_op] , config=lowerCAmelCase_ )
if len(lowerCAmelCase_ ):
_a : List[str] = renew_attention_paths(lowerCAmelCase_ )
_a : List[Any] = {
'old': f"""input_blocks.{i}.1""",
'new': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
_a : Optional[Any] = {
f"""input_blocks.{i}.1.qkv.bias""": {
'key': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'query': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'value': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""input_blocks.{i}.1.qkv.weight""": {
'key': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'query': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'value': f"""down_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , attention_paths_to_split=lowerCAmelCase_ , config=lowerCAmelCase_ , )
_a : str = middle_blocks[0]
_a : Tuple = middle_blocks[1]
_a : Any = middle_blocks[2]
_a : List[Any] = renew_resnet_paths(lowerCAmelCase_ )
assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , config=lowerCAmelCase_ )
_a : Any = renew_resnet_paths(lowerCAmelCase_ )
assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , config=lowerCAmelCase_ )
_a : int = renew_attention_paths(lowerCAmelCase_ )
_a : int = {
'middle_block.1.qkv.bias': {
'key': 'mid_block.attentions.0.key.bias',
'query': 'mid_block.attentions.0.query.bias',
'value': 'mid_block.attentions.0.value.bias',
},
'middle_block.1.qkv.weight': {
'key': 'mid_block.attentions.0.key.weight',
'query': 'mid_block.attentions.0.query.weight',
'value': 'mid_block.attentions.0.value.weight',
},
}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , attention_paths_to_split=lowerCAmelCase_ , config=lowerCAmelCase_ )
for i in range(lowerCAmelCase_ ):
_a : List[str] = i // (config['num_res_blocks'] + 1)
_a : Any = i % (config['num_res_blocks'] + 1)
_a : Union[str, Any] = [shave_segments(lowerCAmelCase_ , 2 ) for name in output_blocks[i]]
_a : Optional[Any] = {}
for layer in output_block_layers:
_a , _a : str = layer.split('.' )[0], shave_segments(lowerCAmelCase_ , 1 )
if layer_id in output_block_list:
output_block_list[layer_id].append(lowerCAmelCase_ )
else:
_a : str = [layer_name]
if len(lowerCAmelCase_ ) > 1:
_a : str = [key for key in output_blocks[i] if f"""output_blocks.{i}.0""" in key]
_a : Optional[Any] = [key for key in output_blocks[i] if f"""output_blocks.{i}.1""" in key]
_a : Dict = renew_resnet_paths(lowerCAmelCase_ )
_a : str = renew_resnet_paths(lowerCAmelCase_ )
_a : Optional[int] = {'old': f"""output_blocks.{i}.0""", 'new': f"""up_blocks.{block_id}.resnets.{layer_in_block_id}"""}
assign_to_checkpoint(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , config=lowerCAmelCase_ )
if ["conv.weight", "conv.bias"] in output_block_list.values():
_a : List[Any] = list(output_block_list.values() ).index(['conv.weight', 'conv.bias'] )
_a : Tuple = checkpoint[
f"""output_blocks.{i}.{index}.conv.weight"""
]
_a : List[str] = checkpoint[
f"""output_blocks.{i}.{index}.conv.bias"""
]
# Clear attentions as they have been attributed above.
if len(lowerCAmelCase_ ) == 2:
_a : Union[str, Any] = []
if len(lowerCAmelCase_ ):
_a : Tuple = renew_attention_paths(lowerCAmelCase_ )
_a : str = {
'old': f"""output_blocks.{i}.1""",
'new': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}""",
}
_a : List[Any] = {
f"""output_blocks.{i}.1.qkv.bias""": {
'key': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.bias""",
'query': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.bias""",
'value': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.bias""",
},
f"""output_blocks.{i}.1.qkv.weight""": {
'key': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.key.weight""",
'query': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.query.weight""",
'value': f"""up_blocks.{block_id}.attentions.{layer_in_block_id}.value.weight""",
},
}
assign_to_checkpoint(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , additional_replacements=[meta_path] , attention_paths_to_split=to_split if any('qkv' in key for key in attentions ) else None , config=lowerCAmelCase_ , )
else:
_a : List[Any] = renew_resnet_paths(lowerCAmelCase_ , n_shave_prefix_segments=1 )
for path in resnet_0_paths:
_a : int = '.'.join(['output_blocks', str(lowerCAmelCase_ ), path['old']] )
_a : Union[str, Any] = '.'.join(['up_blocks', str(lowerCAmelCase_ ), 'resnets', str(lowerCAmelCase_ ), path['new']] )
_a : Union[str, Any] = checkpoint[old_path]
return new_checkpoint
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, required=True, help='''Path to the checkpoint to convert.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the architecture.''',
)
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the output model.''')
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = torch.load(args.checkpoint_path)
with open(args.config_file) as f:
__lowerCAmelCase = json.loads(f.read())
__lowerCAmelCase = convert_ldm_checkpoint(checkpoint, config)
if "ldm" in config:
del config["ldm"]
__lowerCAmelCase = UNetaDModel(**config)
model.load_state_dict(converted_checkpoint)
try:
__lowerCAmelCase = DDPMScheduler.from_config('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__lowerCAmelCase = VQModel.from_pretrained('''/'''.join(args.checkpoint_path.split('''/''')[:-1]))
__lowerCAmelCase = LDMPipeline(unet=model, scheduler=scheduler, vae=vqvae)
pipe.save_pretrained(args.dump_path)
except: # noqa: E722
model.save_pretrained(args.dump_path)
| 89 | 0 |
from __future__ import annotations
class A :
'''simple docstring'''
def __init__( self : List[str] , __lowerCAmelCase : Union[str, Any]=None ) -> str:
"""simple docstring"""
A__ = data
A__ = None
def __repr__( self : str ) -> List[Any]:
"""simple docstring"""
A__ = []
A__ = self
while temp:
string_rep.append(f'{temp.data}' )
A__ = temp.next
return "->".join(_UpperCAmelCase )
def __lowerCamelCase ( __a :List[str] ) -> Tuple:
"""simple docstring"""
if not elements_list:
raise Exception("""The Elements List is empty""" )
A__ = Node(elements_list[0] )
for i in range(1 , len(lowerCAmelCase_ ) ):
A__ = Node(elements_list[i] )
A__ = current.next
return head
def __lowerCamelCase ( __a :Optional[int] ) -> None:
"""simple docstring"""
if head_node is not None and isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
print_reverse(head_node.next )
print(head_node.data )
def __lowerCamelCase ( ) -> List[str]:
"""simple docstring"""
from doctest import testmod
testmod()
A__ = make_linked_list([1_4, 5_2, 1_4, 1_2, 4_3] )
print("""Linked List:""" )
print(lowerCAmelCase_ )
print("""Elements in Reverse:""" )
print_reverse(lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 274 |
'''simple docstring'''
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> np.array:
_a : Optional[int] = f"""{sampling_rate}"""
_a : Any = '1'
_a : Optional[int] = 'f32le'
_a : Any = [
'ffmpeg',
'-i',
'pipe:0',
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
try:
with subprocess.Popen(lowerCAmelCase_ , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
_a : int = ffmpeg_process.communicate(lowerCAmelCase_ )
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to load audio files from filename' ) from error
_a : int = output_stream[0]
_a : List[str] = np.frombuffer(lowerCAmelCase_ , np.floataa )
if audio.shape[0] == 0:
raise ValueError('Malformed soundfile' )
return audio
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = "f32le" , ) -> Union[str, Any]:
_a : List[str] = f"""{sampling_rate}"""
_a : List[str] = '1'
if format_for_conversion == "s16le":
_a : List[Any] = 2
elif format_for_conversion == "f32le":
_a : Dict = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
_a : Any = platform.system()
if system == "Linux":
_a : Union[str, Any] = 'alsa'
_a : Union[str, Any] = 'default'
elif system == "Darwin":
_a : Any = 'avfoundation'
_a : Optional[int] = ':0'
elif system == "Windows":
_a : str = 'dshow'
_a : Tuple = 'default'
_a : str = [
'ffmpeg',
'-f',
format_,
'-i',
input_,
'-ac',
ac,
'-ar',
ar,
'-f',
format_for_conversion,
'-fflags',
'nobuffer',
'-hide_banner',
'-loglevel',
'quiet',
'pipe:1',
]
_a : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
_a : Union[str, Any] = _ffmpeg_stream(lowerCAmelCase_ , lowerCAmelCase_ )
for item in iterator:
yield item
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = "f32le" , ) -> str:
if stream_chunk_s is not None:
_a : str = stream_chunk_s
else:
_a : List[str] = chunk_length_s
_a : int = ffmpeg_microphone(lowerCAmelCase_ , lowerCAmelCase_ , format_for_conversion=lowerCAmelCase_ )
if format_for_conversion == "s16le":
_a : Optional[Any] = np.intaa
_a : List[Any] = 2
elif format_for_conversion == "f32le":
_a : Tuple = np.floataa
_a : Any = 4
else:
raise ValueError(f"""Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`""" )
if stride_length_s is None:
_a : str = chunk_length_s / 6
_a : Any = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(lowerCAmelCase_ , (int, float) ):
_a : List[str] = [stride_length_s, stride_length_s]
_a : str = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
_a : List[str] = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
_a : Any = datetime.datetime.now()
_a : Dict = datetime.timedelta(seconds=lowerCAmelCase_ )
for item in chunk_bytes_iter(lowerCAmelCase_ , lowerCAmelCase_ , stride=(stride_left, stride_right) , stream=lowerCAmelCase_ ):
# Put everything back in numpy scale
_a : List[Any] = np.frombuffer(item['raw'] , dtype=lowerCAmelCase_ )
_a : List[str] = (
item['stride'][0] // size_of_sample,
item['stride'][1] // size_of_sample,
)
_a : Union[str, Any] = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = False ) -> List[Any]:
_a : Tuple = B''
_a , _a : str = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f"""Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}""" )
_a : Optional[int] = 0
for raw in iterator:
acc += raw
if stream and len(lowerCAmelCase_ ) < chunk_len:
_a : str = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(lowerCAmelCase_ ) >= chunk_len:
# We are flushing the accumulator
_a : Union[str, Any] = (_stride_left, stride_right)
_a : Dict = {'raw': acc[:chunk_len], 'stride': stride}
if stream:
_a : List[str] = False
yield item
_a : int = stride_left
_a : List[Any] = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(lowerCAmelCase_ ) > stride_left:
_a : str = {'raw': acc, 'stride': (_stride_left, 0)}
if stream:
_a : str = False
yield item
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Tuple:
_a : Optional[Any] = 2**24 # 16Mo
try:
with subprocess.Popen(lowerCAmelCase_ , stdout=subprocess.PIPE , bufsize=lowerCAmelCase_ ) as ffmpeg_process:
while True:
_a : Any = ffmpeg_process.stdout.read(lowerCAmelCase_ )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError('ffmpeg was not found but is required to stream audio files from filename' ) from error
| 89 | 0 |
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
__lowerCamelCase : List[str] = logging.getLogger(__name__)
def _snake_case ( lowerCAmelCase : Tuple , lowerCAmelCase : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = np.argmax(lowerCAmelCase_ , axis=1 )
return np.sum(outputs == labels )
def _snake_case ( lowerCAmelCase : Tuple ):
"""simple docstring"""
with open(lowerCAmelCase_ , encoding="utf_8" ) as f:
SCREAMING_SNAKE_CASE_ : Optional[Any] = csv.reader(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = []
next(lowerCAmelCase_ ) # skip the first line
for line in tqdm(lowerCAmelCase_ ):
output.append((" ".join(line[1:5] ), line[5], line[6], int(line[-1] ) - 1) )
return output
def _snake_case ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Tuple , lowerCAmelCase : str , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Union[str, Any] = []
for dataset in encoded_datasets:
SCREAMING_SNAKE_CASE_ : Any = len(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : str = np.zeros((n_batch, 2, input_len) , dtype=np.intaa )
SCREAMING_SNAKE_CASE_ : Dict = np.zeros((n_batch, 2) , dtype=np.intaa )
SCREAMING_SNAKE_CASE_ : List[str] = np.full((n_batch, 2, input_len) , fill_value=-1_0_0 , dtype=np.intaa )
SCREAMING_SNAKE_CASE_ : Any = np.zeros((n_batch,) , dtype=np.intaa )
for (
i,
(story, conta, conta, mc_label),
) in enumerate(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE_ : int = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
SCREAMING_SNAKE_CASE_ : List[Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
SCREAMING_SNAKE_CASE_ : List[Any] = with_conta
SCREAMING_SNAKE_CASE_ : Dict = with_conta
SCREAMING_SNAKE_CASE_ : int = len(lowerCAmelCase_ ) - 1
SCREAMING_SNAKE_CASE_ : Optional[int] = len(lowerCAmelCase_ ) - 1
SCREAMING_SNAKE_CASE_ : str = with_conta
SCREAMING_SNAKE_CASE_ : Any = with_conta
SCREAMING_SNAKE_CASE_ : Union[str, Any] = mc_label
SCREAMING_SNAKE_CASE_ : List[str] = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(lowerCAmelCase_ ) for t in all_inputs ) )
return tensor_datasets
def _snake_case ( ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : List[str] = argparse.ArgumentParser()
parser.add_argument("--model_name" , type=lowerCAmelCase_ , default="openai-gpt" , help="pretrained model name" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_eval" , action="store_true" , help="Whether to run eval on the dev set." )
parser.add_argument(
"--output_dir" , default=lowerCAmelCase_ , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument("--train_dataset" , type=lowerCAmelCase_ , default="" )
parser.add_argument("--eval_dataset" , type=lowerCAmelCase_ , default="" )
parser.add_argument("--seed" , type=lowerCAmelCase_ , default=4_2 )
parser.add_argument("--num_train_epochs" , type=lowerCAmelCase_ , default=3 )
parser.add_argument("--train_batch_size" , type=lowerCAmelCase_ , default=8 )
parser.add_argument("--eval_batch_size" , type=lowerCAmelCase_ , default=1_6 )
parser.add_argument("--adam_epsilon" , default=1E-8 , type=lowerCAmelCase_ , help="Epsilon for Adam optimizer." )
parser.add_argument("--max_grad_norm" , type=lowerCAmelCase_ , default=1 )
parser.add_argument(
"--max_steps" , default=-1 , type=lowerCAmelCase_ , help=(
"If > 0: set total number of training steps to perform. Override num_train_epochs."
) , )
parser.add_argument(
"--gradient_accumulation_steps" , type=lowerCAmelCase_ , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--learning_rate" , type=lowerCAmelCase_ , default=6.2_5E-5 )
parser.add_argument("--warmup_steps" , default=0 , type=lowerCAmelCase_ , help="Linear warmup over warmup_steps." )
parser.add_argument("--lr_schedule" , type=lowerCAmelCase_ , default="warmup_linear" )
parser.add_argument("--weight_decay" , type=lowerCAmelCase_ , default=0.01 )
parser.add_argument("--lm_coef" , type=lowerCAmelCase_ , default=0.9 )
parser.add_argument("--n_valid" , type=lowerCAmelCase_ , default=3_7_4 )
parser.add_argument("--server_ip" , type=lowerCAmelCase_ , default="" , help="Can be used for distant debugging." )
parser.add_argument("--server_port" , type=lowerCAmelCase_ , default="" , help="Can be used for distant debugging." )
SCREAMING_SNAKE_CASE_ : str = parser.parse_args()
print(lowerCAmelCase_ )
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("Waiting for debugger attach" )
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCAmelCase_ )
ptvsd.wait_for_attach()
random.seed(args.seed )
np.random.seed(args.seed )
torch.manual_seed(args.seed )
torch.cuda.manual_seed_all(args.seed )
SCREAMING_SNAKE_CASE_ : List[Any] = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
SCREAMING_SNAKE_CASE_ : Dict = torch.cuda.device_count()
logger.info("device: {}, n_gpu {}".format(lowerCAmelCase_ , lowerCAmelCase_ ) )
if not args.do_train and not args.do_eval:
raise ValueError("At least one of `do_train` or `do_eval` must be True." )
if not os.path.exists(args.output_dir ):
os.makedirs(args.output_dir )
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
SCREAMING_SNAKE_CASE_ : List[Any] = ['_start_', '_delimiter_', '_classify_']
SCREAMING_SNAKE_CASE_ : Union[str, Any] = OpenAIGPTTokenizer.from_pretrained(args.model_name )
tokenizer.add_tokens(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Dict = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name )
model.resize_token_embeddings(len(lowerCAmelCase_ ) )
model.to(lowerCAmelCase_ )
# Load and encode the datasets
def tokenize_and_encode(lowerCAmelCase : int ):
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(lowerCAmelCase_ ) )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return obj
return [tokenize_and_encode(lowerCAmelCase_ ) for o in obj]
logger.info("Encoding dataset..." )
SCREAMING_SNAKE_CASE_ : str = load_rocstories_dataset(args.train_dataset )
SCREAMING_SNAKE_CASE_ : Dict = load_rocstories_dataset(args.eval_dataset )
SCREAMING_SNAKE_CASE_ : List[Any] = (train_dataset, eval_dataset)
SCREAMING_SNAKE_CASE_ : Dict = tokenize_and_encode(lowerCAmelCase_ )
# Compute the max input length for the Transformer
SCREAMING_SNAKE_CASE_ : Optional[Any] = model.config.n_positions // 2 - 2
SCREAMING_SNAKE_CASE_ : List[Any] = max(
len(story[:max_length] ) + max(len(conta[:max_length] ) , len(conta[:max_length] ) ) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset )
SCREAMING_SNAKE_CASE_ : str = min(lowerCAmelCase_ , model.config.n_positions ) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
SCREAMING_SNAKE_CASE_ : Any = pre_process_datasets(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , *lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : List[Any] = tensor_datasets[0], tensor_datasets[1]
SCREAMING_SNAKE_CASE_ : str = TensorDataset(*lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = RandomSampler(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DataLoader(lowerCAmelCase_ , sampler=lowerCAmelCase_ , batch_size=args.train_batch_size )
SCREAMING_SNAKE_CASE_ : Dict = TensorDataset(*lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] = SequentialSampler(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = DataLoader(lowerCAmelCase_ , sampler=lowerCAmelCase_ , batch_size=args.eval_batch_size )
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
SCREAMING_SNAKE_CASE_ : Dict = args.max_steps
SCREAMING_SNAKE_CASE_ : str = args.max_steps // (len(lowerCAmelCase_ ) // args.gradient_accumulation_steps) + 1
else:
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(lowerCAmelCase_ ) // args.gradient_accumulation_steps * args.num_train_epochs
SCREAMING_SNAKE_CASE_ : int = list(model.named_parameters() )
SCREAMING_SNAKE_CASE_ : Optional[int] = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
SCREAMING_SNAKE_CASE_ : List[Any] = [
{
'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay )],
'weight_decay': args.weight_decay,
},
{'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay )], 'weight_decay': 0.0},
]
SCREAMING_SNAKE_CASE_ : int = AdamW(lowerCAmelCase_ , lr=args.learning_rate , eps=args.adam_epsilon )
SCREAMING_SNAKE_CASE_ : Optional[Any] = get_linear_schedule_with_warmup(
lowerCAmelCase_ , num_warmup_steps=args.warmup_steps , num_training_steps=lowerCAmelCase_ )
if args.do_train:
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs ) , desc="Epoch" ):
SCREAMING_SNAKE_CASE_ : int = 0
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE_ : Union[str, Any] = tqdm(lowerCAmelCase_ , desc="Training" )
for step, batch in enumerate(lowerCAmelCase_ ):
SCREAMING_SNAKE_CASE_ : Tuple = tuple(t.to(lowerCAmelCase_ ) for t in batch )
SCREAMING_SNAKE_CASE_ : Dict = batch
SCREAMING_SNAKE_CASE_ : int = model(lowerCAmelCase_ , mc_token_ids=lowerCAmelCase_ , lm_labels=lowerCAmelCase_ , mc_labels=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : List[str] = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
SCREAMING_SNAKE_CASE_ : str = 'Training loss: {:.2e} lr: {:.2e}'.format(lowerCAmelCase_ , scheduler.get_lr()[0] )
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
SCREAMING_SNAKE_CASE_ : Any = model.module if hasattr(lowerCAmelCase_ , "module" ) else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
SCREAMING_SNAKE_CASE_ : Optional[int] = os.path.join(args.output_dir , lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Any = os.path.join(args.output_dir , lowerCAmelCase_ )
torch.save(model_to_save.state_dict() , lowerCAmelCase_ )
model_to_save.config.to_json_file(lowerCAmelCase_ )
tokenizer.save_vocabulary(args.output_dir )
# Load a trained model and vocabulary that you have fine-tuned
SCREAMING_SNAKE_CASE_ : int = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir )
SCREAMING_SNAKE_CASE_ : List[Any] = OpenAIGPTTokenizer.from_pretrained(args.output_dir )
model.to(lowerCAmelCase_ )
if args.do_eval:
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = 0, 0
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0, 0
for batch in tqdm(lowerCAmelCase_ , desc="Evaluating" ):
SCREAMING_SNAKE_CASE_ : List[Any] = tuple(t.to(lowerCAmelCase_ ) for t in batch )
SCREAMING_SNAKE_CASE_ : str = batch
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Dict = model(
lowerCAmelCase_ , mc_token_ids=lowerCAmelCase_ , lm_labels=lowerCAmelCase_ , mc_labels=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ : Tuple = mc_logits.detach().cpu().numpy()
SCREAMING_SNAKE_CASE_ : List[Any] = mc_labels.to("cpu" ).numpy()
SCREAMING_SNAKE_CASE_ : Optional[Any] = accuracy(lowerCAmelCase_ , lowerCAmelCase_ )
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0 )
nb_eval_steps += 1
SCREAMING_SNAKE_CASE_ : Union[str, Any] = eval_loss / nb_eval_steps
SCREAMING_SNAKE_CASE_ : Tuple = eval_accuracy / nb_eval_examples
SCREAMING_SNAKE_CASE_ : int = tr_loss / nb_tr_steps if args.do_train else None
SCREAMING_SNAKE_CASE_ : Dict = {'eval_loss': eval_loss, 'eval_accuracy': eval_accuracy, 'train_loss': train_loss}
SCREAMING_SNAKE_CASE_ : str = os.path.join(args.output_dir , "eval_results.txt" )
with open(lowerCAmelCase_ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , lowerCAmelCase_ , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
if __name__ == "__main__":
main()
| 18 |
'''simple docstring'''
__lowerCAmelCase = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]:
_a : List[Any] = set()
# keep track of all the paths to be checked
_a : Any = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
_a : Tuple = queue.pop(0 )
# get the last node from the path
_a : Tuple = path[-1]
if node not in explored:
_a : Optional[Any] = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
_a : Any = list(lowerCAmelCase_ )
new_path.append(lowerCAmelCase_ )
queue.append(lowerCAmelCase_ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(lowerCAmelCase_ )
# in case there's no path between the 2 nodes
return []
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
_a : Optional[int] = [start]
_a : Dict = set(lowerCAmelCase_ )
# Keep tab on distances from `start` node.
_a : Dict = {start: 0, target: -1}
while queue:
_a : List[str] = queue.pop(0 )
if node == target:
_a : Any = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(lowerCAmelCase_ )
queue.append(lowerCAmelCase_ )
_a : Any = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 89 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm import create_model
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import BitConfig, BitForImageClassification, BitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
lowercase__ : Union[str, Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Optional[Any]:
lowerCAmelCase = 'huggingface/label-files'
lowerCAmelCase = 'imagenet-1k-id2label.json'
lowerCAmelCase = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type='''dataset''' ) , '''r''' ) )
lowerCAmelCase = {int(lowerCAmelCase_ ): v for k, v in idalabel.items()}
lowerCAmelCase = {v: k for k, v in idalabel.items()}
lowerCAmelCase = 'std_conv' if 'bit' in model_name else False
# note that when using BiT as backbone for ViT-hybrid checkpoints,
# one needs to additionally set config.layer_type = "bottleneck", config.stem_type = "same",
# config.conv_layer = "std_conv_same"
lowerCAmelCase = BitConfig(
conv_layer=lowerCAmelCase_ , num_labels=1_0_0_0 , idalabel=lowerCAmelCase_ , labelaid=lowerCAmelCase_ , )
return config
def SCREAMING_SNAKE_CASE_ ( snake_case__ ) -> Optional[Any]:
if "stem.conv" in name:
lowerCAmelCase = name.replace('''stem.conv''' , '''bit.embedder.convolution''' )
if "blocks" in name:
lowerCAmelCase = name.replace('''blocks''' , '''layers''' )
if "head.fc" in name:
lowerCAmelCase = name.replace('''head.fc''' , '''classifier.1''' )
if name.startswith('''norm''' ):
lowerCAmelCase = 'bit.' + name
if "bit" not in name and "classifier" not in name:
lowerCAmelCase = 'bit.encoder.' + name
return name
def SCREAMING_SNAKE_CASE_ ( ) -> str:
lowerCAmelCase = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__=False ) -> Optional[int]:
lowerCAmelCase = get_config(lowerCAmelCase_ )
# load original model from timm
lowerCAmelCase = create_model(lowerCAmelCase_ , pretrained=lowerCAmelCase_ )
timm_model.eval()
# load state_dict of original model
lowerCAmelCase = timm_model.state_dict()
for key in state_dict.copy().keys():
lowerCAmelCase = state_dict.pop(lowerCAmelCase_ )
lowerCAmelCase = val.squeeze() if 'head' in key else val
# load HuggingFace model
lowerCAmelCase = BitForImageClassification(lowerCAmelCase_ )
model.eval()
model.load_state_dict(lowerCAmelCase_ )
# create image processor
lowerCAmelCase = create_transform(**resolve_data_config({} , model=lowerCAmelCase_ ) )
lowerCAmelCase = transform.transforms
lowerCAmelCase = {
'bilinear': PILImageResampling.BILINEAR,
'bicubic': PILImageResampling.BICUBIC,
'nearest': PILImageResampling.NEAREST,
}
lowerCAmelCase = BitImageProcessor(
do_resize=lowerCAmelCase_ , size={'''shortest_edge''': timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=lowerCAmelCase_ , crop_size={'''height''': timm_transforms[1].size[0], '''width''': timm_transforms[1].size[1]} , do_normalize=lowerCAmelCase_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
lowerCAmelCase = prepare_img()
lowerCAmelCase = transform(lowerCAmelCase_ ).unsqueeze(0 )
lowerCAmelCase = processor(lowerCAmelCase_ , return_tensors='''pt''' ).pixel_values
# verify pixel values
assert torch.allclose(lowerCAmelCase_ , lowerCAmelCase_ )
# verify logits
with torch.no_grad():
lowerCAmelCase = model(lowerCAmelCase_ )
lowerCAmelCase = outputs.logits
print('''Logits:''' , logits[0, :3] )
print('''Predicted class:''' , model.config.idalabel[logits.argmax(-1 ).item()] )
lowerCAmelCase = timm_model(lowerCAmelCase_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(lowerCAmelCase_ , outputs.logits , atol=1E-3 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(f"Saving model {model_name} and processor to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase_ )
processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
print(f"Pushing model {model_name} and processor to the hub" )
model.push_to_hub(f"ybelkada/{model_name}" )
processor.push_to_hub(f"ybelkada/{model_name}" )
if __name__ == "__main__":
lowercase__ : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default='''resnetv2_50x1_bitm''',
type=str,
help='''Name of the BiT timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
help='''Whether to push the model to the hub.''',
)
lowercase__ : List[str] = parser.parse_args()
convert_bit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 338 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCAmelCase = {'''configuration_swin''': ['''SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''SwinConfig''', '''SwinOnnxConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''SwinForImageClassification''',
'''SwinForMaskedImageModeling''',
'''SwinModel''',
'''SwinPreTrainedModel''',
'''SwinBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFSwinForImageClassification''',
'''TFSwinForMaskedImageModeling''',
'''TFSwinModel''',
'''TFSwinPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_swin import SWIN_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinConfig, SwinOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swin import (
SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinBackbone,
SwinForImageClassification,
SwinForMaskedImageModeling,
SwinModel,
SwinPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_swin import (
TF_SWIN_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSwinForImageClassification,
TFSwinForMaskedImageModeling,
TFSwinModel,
TFSwinPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(_UpperCamelCase ) , 'Tatoeba directory does not exist.' )
class A__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self: str) -> int:
"""simple docstring"""
__lowerCAmelCase : Optional[Any] = tempfile.mkdtemp()
return TatoebaConverter(save_dir=_UpperCAmelCase)
@slow
def _SCREAMING_SNAKE_CASE ( self: List[Any]) -> Any:
"""simple docstring"""
self.resolver.convert_models(["heb-eng"])
@slow
def _SCREAMING_SNAKE_CASE ( self: Optional[int]) -> List[Any]:
"""simple docstring"""
__lowerCAmelCase : str = self.resolver.write_model_card("opus-mt-he-en" , dry_run=_UpperCAmelCase)
assert mmeta["long_pair"] == "heb-eng" | 269 |
'''simple docstring'''
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class __magic_name__ ( _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : Optional[int] = BarthezTokenizer
lowerCAmelCase : int = BarthezTokenizerFast
lowerCAmelCase : Dict = True
lowerCAmelCase : str = True
def __lowercase ( self : List[Any] ):
super().setUp()
_a : List[Any] = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname ,legacy_format=_UpperCAmelCase )
_a : Union[str, Any] = tokenizer
def __lowercase ( self : Tuple ):
_a : Optional[Any] = '<pad>'
_a : List[Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_UpperCAmelCase ) ,_UpperCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_UpperCAmelCase ) ,_UpperCAmelCase )
def __lowercase ( self : str ):
_a : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] ,'<s>' )
self.assertEqual(vocab_keys[1] ,'<pad>' )
self.assertEqual(vocab_keys[-1] ,'<mask>' )
self.assertEqual(len(_UpperCAmelCase ) ,101122 )
def __lowercase ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size ,101122 )
@require_torch
def __lowercase ( self : Dict ):
_a : Any = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_a : Dict = [0, 57, 3018, 70307, 91, 2]
_a : Dict = self.tokenizer(
_UpperCAmelCase ,max_length=len(_UpperCAmelCase ) ,padding=_UpperCAmelCase ,truncation=_UpperCAmelCase ,return_tensors='pt' )
self.assertIsInstance(_UpperCAmelCase ,_UpperCAmelCase )
self.assertEqual((2, 6) ,batch.input_ids.shape )
self.assertEqual((2, 6) ,batch.attention_mask.shape )
_a : Tuple = batch.input_ids.tolist()[0]
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ):
if not self.test_rust_tokenizer:
return
_a : str = self.get_tokenizer()
_a : List[str] = self.get_rust_tokenizer()
_a : Dict = 'I was born in 92000, and this is falsé.'
_a : List[Any] = tokenizer.tokenize(_UpperCAmelCase )
_a : Tuple = rust_tokenizer.tokenize(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
_a : Optional[Any] = tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase )
_a : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase ,add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
_a : Union[str, Any] = self.get_rust_tokenizer()
_a : Any = tokenizer.encode(_UpperCAmelCase )
_a : Optional[int] = rust_tokenizer.encode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
@slow
def __lowercase ( self : Optional[int] ):
# fmt: off
_a : Optional[int] = {'input_ids': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_a : Optional[Any] = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=_UpperCAmelCase ,model_name='moussaKam/mbarthez' ,revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' ,sequences=_UpperCAmelCase ,)
| 89 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase : str = {
"""configuration_nllb_moe""": [
"""NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""NllbMoeConfig""",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
"""NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""NllbMoeForConditionalGeneration""",
"""NllbMoeModel""",
"""NllbMoePreTrainedModel""",
"""NllbMoeTop2Router""",
"""NllbMoeSparseMLP""",
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
__lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 52 |
'''simple docstring'''
import subprocess
import sys
from transformers import BertConfig, BertModel, BertTokenizer, pipeline
from transformers.testing_utils import TestCasePlus, require_torch
class __magic_name__ ( _UpperCamelCase ):
@require_torch
def __lowercase ( self : Tuple ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a : Optional[int] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_a : List[str] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_a : Tuple = '\nimport socket\ndef offline_socket(*args, **kwargs): raise RuntimeError("Offline mode is enabled, we shouldn\'t access internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_a : List[Any] = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_UpperCAmelCase )
BertModel.from_pretrained(_UpperCAmelCase )
BertTokenizer.from_pretrained(_UpperCAmelCase )
pipeline(task='fill-mask' ,model=_UpperCAmelCase )
# baseline - just load from_pretrained with normal network
_a : Optional[int] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_a : Tuple = self.get_env()
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : int = '1'
_a : List[Any] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def __lowercase ( self : Any ):
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a : Dict = '\nfrom transformers import BertConfig, BertModel, BertTokenizer, pipeline\n '
_a : Optional[int] = '\nmname = "hf-internal-testing/tiny-random-bert"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nBertTokenizer.from_pretrained(mname)\npipe = pipeline(task="fill-mask", model=mname)\nprint("success")\n '
_a : Optional[Any] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Faking flaky internet")\nsocket.socket = offline_socket\n '
# Force fetching the files so that we can use the cache
_a : int = 'hf-internal-testing/tiny-random-bert'
BertConfig.from_pretrained(_UpperCAmelCase )
BertModel.from_pretrained(_UpperCAmelCase )
BertTokenizer.from_pretrained(_UpperCAmelCase )
pipeline(task='fill-mask' ,model=_UpperCAmelCase )
# baseline - just load from_pretrained with normal network
_a : Optional[int] = [sys.executable, '-c', '\n'.join([load, run, mock] )]
# should succeed
_a : str = self.get_env()
_a : Optional[Any] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def __lowercase ( self : List[str] ):
# this test is a bit tricky since TRANSFORMERS_OFFLINE can only be changed before
# `transformers` is loaded, and it's too late for inside pytest - so we are changing it
# while running an external program
# python one-liner segments
# this must be loaded before socket.socket is monkey-patched
_a : Union[str, Any] = '\nfrom transformers import BertConfig, BertModel, BertTokenizer\n '
_a : Optional[Any] = '\nmname = "hf-internal-testing/tiny-random-bert-sharded"\nBertConfig.from_pretrained(mname)\nBertModel.from_pretrained(mname)\nprint("success")\n '
_a : str = '\nimport socket\ndef offline_socket(*args, **kwargs): raise ValueError("Offline mode is enabled")\nsocket.socket = offline_socket\n '
# baseline - just load from_pretrained with normal network
_a : Optional[Any] = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_a : Dict = self.get_env()
_a : int = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
# next emulate no network
_a : List[Any] = [sys.executable, '-c', '\n'.join([load, mock, run] )]
# Doesn't fail anymore since the model is in the cache due to other tests, so commenting this.
# env["TRANSFORMERS_OFFLINE"] = "0"
# result = subprocess.run(cmd, env=env, check=False, capture_output=True)
# self.assertEqual(result.returncode, 1, result.stderr)
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : int = '1'
_a : Any = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
@require_torch
def __lowercase ( self : int ):
_a : Optional[Any] = '\nfrom transformers import pipeline\n '
_a : str = '\nmname = "hf-internal-testing/tiny-random-bert"\npipe = pipeline(model=mname)\n '
_a : List[str] = '\nimport socket\ndef offline_socket(*args, **kwargs): raise socket.error("Offline mode is enabled")\nsocket.socket = offline_socket\n '
_a : List[Any] = self.get_env()
_a : Dict = '1'
_a : Dict = [sys.executable, '-c', '\n'.join([load, mock, run] )]
_a : str = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,1 ,result.stderr )
self.assertIn(
'You cannot infer task automatically within `pipeline` when using offline mode' ,result.stderr.decode().replace('\n' ,'' ) ,)
@require_torch
def __lowercase ( self : int ):
_a : Optional[int] = '\nfrom transformers import AutoModel\n '
_a : List[Any] = '\nmname = "hf-internal-testing/test_dynamic_model"\nAutoModel.from_pretrained(mname, trust_remote_code=True)\nprint("success")\n '
# baseline - just load from_pretrained with normal network
_a : Union[str, Any] = [sys.executable, '-c', '\n'.join([load, run] )]
# should succeed
_a : Tuple = self.get_env()
_a : List[str] = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
# should succeed as TRANSFORMERS_OFFLINE=1 tells it to use local files
_a : Optional[Any] = '1'
_a : Any = subprocess.run(_UpperCAmelCase ,env=_UpperCAmelCase ,check=_UpperCAmelCase ,capture_output=_UpperCAmelCase )
self.assertEqual(result.returncode ,0 ,result.stderr )
self.assertIn('success' ,result.stdout.decode() )
| 89 | 0 |
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
__lowerCamelCase : str = '''\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",
author = "Lin, Chin-Yew and
Och, Franz Josef",
booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",
month = "aug 23{--}aug 27",
year = "2004",
address = "Geneva, Switzerland",
publisher = "COLING",
url = "https://www.aclweb.org/anthology/C04-1072",
pages = "501--507",
}
'''
__lowerCamelCase : Optional[int] = '''\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,
the better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
'''
__lowerCamelCase : Dict = '''
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
\'bleu\': bleu score,
\'precisions\': geometric mean of n-gram precisions,
\'brevity_penalty\': brevity penalty,
\'length_ratio\': ratio of lengths,
\'translation_length\': translation_length,
\'reference_length\': reference_length
Examples:
>>> predictions = [
... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample
... ["foo", "bar", "foobar"] # tokenized prediction of the second sample
... ]
>>> references = [
... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)
... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric("bleu")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results["bleu"])
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __snake_case ( datasets.Metric ):
def __a ( self : str ):
"""simple docstring"""
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def __a ( self : Optional[int] , _lowercase : Any , _lowercase : Dict , _lowercase : List[Any]=4 , _lowercase : Optional[Any]=False ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = compute_bleu(
reference_corpus=_UpperCAmelCase , translation_corpus=_UpperCAmelCase , max_order=_UpperCAmelCase , smooth=_UpperCAmelCase )
(SCREAMING_SNAKE_CASE__) = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 219 |
'''simple docstring'''
def __lowerCamelCase ( ) -> Tuple:
for n in range(1 , 1000000 ):
yield n * (n + 1) // 2
def __lowerCamelCase ( lowerCAmelCase_ ) -> List[Any]:
_a : Any = 1
_a : Tuple = 2
while i * i <= n:
_a : Tuple = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def __lowerCamelCase ( ) -> str:
return next(i for i in triangle_number_generator() if count_divisors(lowerCAmelCase_ ) > 500 )
if __name__ == "__main__":
print(solution())
| 89 | 0 |
"""simple docstring"""
import numpy as np
from scipy.spatial.distance import cdist
from sklearn.metrics import fa_score
import datasets
_UpperCamelCase: int = '\\n @inproceedings{kakwani2020indicnlpsuite,\n title={{IndicNLPSuite: Monolingual Corpora, Evaluation Benchmarks and Pre-trained Multilingual Language Models for Indian Languages}},\n author={Divyanshu Kakwani and Anoop Kunchukuttan and Satish Golla and Gokul N.C. and Avik Bhattacharyya and Mitesh M. Khapra and Pratyush Kumar},\n year={2020},\n booktitle={Findings of EMNLP},\n}\n'
_UpperCamelCase: Optional[int] = '\\n IndicGLUE is a natural language understanding benchmark for Indian languages. It contains a wide\n variety of tasks and covers 11 major Indian languages - as, bn, gu, hi, kn, ml, mr, or, pa, ta, te.\n'
_UpperCamelCase: Tuple = '\nCompute IndicGLUE evaluation metric associated to each IndicGLUE dataset.\nArgs:\n predictions: list of predictions to score (as int64),\n except for \'cvit-mkb-clsr\' where each prediction is a vector (of float32).\n references: list of ground truth labels corresponding to the predictions (as int64),\n except for \'cvit-mkb-clsr\' where each reference is a vector (of float32).\nReturns: depending on the IndicGLUE subset, one or several of:\n "accuracy": Accuracy\n "f1": F1 score\n "precision": Precision@10\nExamples:\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wnli\') # \'wnli\' or any of ["copa", "sna", "csqa", "wstp", "inltkh", "bbca", "iitp-mr", "iitp-pr", "actsa-sc", "md"]\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'wiki-ner\')\n >>> references = [0, 1]\n >>> predictions = [0, 1]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'accuracy\': 1.0, \'f1\': 1.0}\n\n >>> indic_glue_metric = datasets.load_metric(\'indic_glue\', \'cvit-mkb-clsr\')\n >>> references = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> predictions = [[0.5, 0.5, 0.5], [0.1, 0.2, 0.3]]\n >>> results = indic_glue_metric.compute(predictions=predictions, references=references)\n >>> print(results)\n {\'precision@10\': 1.0}\n\n'
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
return float((preds == labels).mean() )
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> str:
'''simple docstring'''
lowercase : Dict = simple_accuracy(lowerCAmelCase_ , lowerCAmelCase_ )
lowercase : Dict = float(fa_score(y_true=lowerCAmelCase_ , y_pred=lowerCAmelCase_ ) )
return {
"accuracy": acc,
"f1": fa,
}
def lowercase__ ( _UpperCAmelCase , _UpperCAmelCase ) -> List[str]:
'''simple docstring'''
lowercase : List[Any] = np.array(lowerCAmelCase_ )
lowercase : Optional[Any] = np.array(lowerCAmelCase_ )
lowercase : Optional[Any] = en_sentvecs.shape[0]
# mean centering
lowercase : Dict = en_sentvecs - np.mean(lowerCAmelCase_ , axis=0 )
lowercase : Optional[Any] = in_sentvecs - np.mean(lowerCAmelCase_ , axis=0 )
lowercase : str = cdist(lowerCAmelCase_ , lowerCAmelCase_ , 'cosine' )
lowercase : str = np.array(range(lowerCAmelCase_ ) )
lowercase : Any = sim.argsort(axis=1 )[:, :10]
lowercase : Optional[int] = np.any(preds == actual[:, None] , axis=1 )
return float(matches.mean() )
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class a__ ( datasets.Metric ):
def lowercase ( self : Optional[int] ) -> List[Any]:
if self.config_name not in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"cvit-mkb-clsr",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
"wiki-ner",
]:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]' )
return datasets.MetricInfo(
description=_DESCRIPTION, citation=_CITATION, inputs_description=_KWARGS_DESCRIPTION, features=datasets.Features(
{
'predictions': datasets.Value('int64' )
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32' ) ),
'references': datasets.Value('int64' )
if self.config_name != 'cvit-mkb-clsr'
else datasets.Sequence(datasets.Value('float32' ) ),
} ), codebase_urls=[], reference_urls=[], format='numpy' if self.config_name != 'cvit-mkb-clsr' else None, )
def lowercase ( self : Union[str, Any], lowerCAmelCase : List[str], lowerCAmelCase : str ) -> Optional[Any]:
if self.config_name == "cvit-mkb-clsr":
return {"precision@10": precision_at_aa(_UpperCAmelCase, _UpperCAmelCase )}
elif self.config_name in ["wiki-ner"]:
return acc_and_fa(_UpperCAmelCase, _UpperCAmelCase )
elif self.config_name in [
"wnli",
"copa",
"sna",
"csqa",
"wstp",
"inltkh",
"bbca",
"iitp-mr",
"iitp-pr",
"actsa-sc",
"md",
]:
return {"accuracy": simple_accuracy(_UpperCAmelCase, _UpperCAmelCase )}
else:
raise KeyError(
'You should supply a configuration name selected in '
'["wnli", "copa", "sna", "csqa", "wstp", "inltkh", "bbca", '
'"cvit-mkb-clsr", "iitp-mr", "iitp-pr", "actsa-sc", "md", '
'"wiki-ner"]' )
| 255 |
'''simple docstring'''
import multiprocessing
from typing import TYPE_CHECKING, Optional, Union
from .. import Dataset, Features, config
from ..formatting import query_table
from ..packaged_modules.sql.sql import Sql
from ..utils import logging
from .abc import AbstractDatasetInputStream
if TYPE_CHECKING:
import sqlitea
import sqlalchemy
class __magic_name__ ( _UpperCamelCase ):
def __init__( self : Optional[int] ,_UpperCAmelCase : Union[str, "sqlalchemy.sql.Selectable"] ,_UpperCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] ,_UpperCAmelCase : Optional[Features] = None ,_UpperCAmelCase : str = None ,_UpperCAmelCase : bool = False ,**_UpperCAmelCase : Dict ,):
super().__init__(features=_UpperCAmelCase ,cache_dir=_UpperCAmelCase ,keep_in_memory=_UpperCAmelCase ,**_UpperCAmelCase )
_a : Tuple = Sql(
cache_dir=_UpperCAmelCase ,features=_UpperCAmelCase ,sql=_UpperCAmelCase ,con=_UpperCAmelCase ,**_UpperCAmelCase ,)
def __lowercase ( self : Dict ):
_a : Optional[Any] = None
_a : Dict = None
_a : Dict = None
_a : Optional[int] = None
self.builder.download_and_prepare(
download_config=_UpperCAmelCase ,download_mode=_UpperCAmelCase ,verification_mode=_UpperCAmelCase ,base_path=_UpperCAmelCase ,)
# Build dataset for splits
_a : List[str] = self.builder.as_dataset(
split='train' ,verification_mode=_UpperCAmelCase ,in_memory=self.keep_in_memory )
return dataset
class __magic_name__ :
def __init__( self : Optional[int] ,_UpperCAmelCase : Dataset ,_UpperCAmelCase : str ,_UpperCAmelCase : Union[str, "sqlalchemy.engine.Connection", "sqlalchemy.engine.Engine", "sqlite3.Connection"] ,_UpperCAmelCase : Optional[int] = None ,_UpperCAmelCase : Optional[int] = None ,**_UpperCAmelCase : Dict ,):
if num_proc is not None and num_proc <= 0:
raise ValueError(F"""num_proc {num_proc} must be an integer > 0.""" )
_a : Dict = dataset
_a : List[Any] = name
_a : Tuple = con
_a : Union[str, Any] = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
_a : List[Any] = num_proc
_a : Tuple = to_sql_kwargs
def __lowercase ( self : List[Any] ):
_a : Tuple = self.to_sql_kwargs.pop('sql' ,_UpperCAmelCase )
_a : str = self.to_sql_kwargs.pop('con' ,_UpperCAmelCase )
_a : Optional[Any] = self.to_sql_kwargs.pop('index' ,_UpperCAmelCase )
_a : Any = self._write(index=_UpperCAmelCase ,**self.to_sql_kwargs )
return written
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Dict ):
_a , _a , _a : Any = args
_a : Tuple = {**to_sql_kwargs, 'if_exists': 'append'} if offset > 0 else to_sql_kwargs
_a : Dict = query_table(
table=self.dataset.data ,key=slice(_UpperCAmelCase ,offset + self.batch_size ) ,indices=self.dataset._indices ,)
_a : Tuple = batch.to_pandas()
_a : Dict = df.to_sql(self.name ,self.con ,index=_UpperCAmelCase ,**_UpperCAmelCase )
return num_rows or len(_UpperCAmelCase )
def __lowercase ( self : int ,_UpperCAmelCase : Optional[int] ,**_UpperCAmelCase : List[Any] ):
_a : Union[str, Any] = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 ,len(self.dataset ) ,self.batch_size ) ,unit='ba' ,disable=not logging.is_progress_bar_enabled() ,desc='Creating SQL from Arrow format' ,):
written += self._batch_sql((offset, index, to_sql_kwargs) )
else:
_a , _a : List[Any] = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for num_rows in logging.tqdm(
pool.imap(
self._batch_sql ,[(offset, index, to_sql_kwargs) for offset in range(0 ,_UpperCAmelCase ,_UpperCAmelCase )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit='ba' ,disable=not logging.is_progress_bar_enabled() ,desc='Creating SQL from Arrow format' ,):
written += num_rows
return written
| 89 | 0 |
'''simple docstring'''
import argparse
import json
import re
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileNetVaConfig,
MobileNetVaForImageClassification,
MobileNetVaImageProcessor,
load_tf_weights_in_mobilenet_va,
)
from transformers.utils import logging
logging.set_verbosity_info()
a : List[str] = logging.get_logger(__name__)
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : str = MobileNetVaConfig(layer_norm_eps=0.0_0_1 )
if "_quant" in model_name:
raise ValueError("Quantized models are not supported." )
UpperCAmelCase : int = re.match(R"^mobilenet_v1_([^_]*)_([^_]*)$" , lowerCAmelCase_ )
if matches:
UpperCAmelCase : str = float(matches[1] )
UpperCAmelCase : List[Any] = int(matches[2] )
# The TensorFlow version of MobileNetV1 predicts 1001 classes instead of
# the usual 1000. The first class (index 0) is "background".
UpperCAmelCase : int = 1001
UpperCAmelCase : str = 'imagenet-1k-id2label.json'
UpperCAmelCase : Tuple = 'huggingface/label-files'
UpperCAmelCase : Optional[Any] = json.load(open(hf_hub_download(lowerCAmelCase_ , lowerCAmelCase_ , repo_type="dataset" ) , "r" ) )
UpperCAmelCase : List[Any] = {int(lowerCAmelCase_ ) + 1: v for k, v in idalabel.items()}
UpperCAmelCase : Optional[Any] = 'background'
UpperCAmelCase : str = idalabel
UpperCAmelCase : int = {v: k for k, v in idalabel.items()}
return config
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : Any = 'http://images.cocodataset.org/val2017/000000039769.jpg'
UpperCAmelCase : Tuple = Image.open(requests.get(lowerCAmelCase_ , stream=lowerCAmelCase_ ).raw )
return im
@torch.no_grad()
def lowercase ( __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__=False ):
'''simple docstring'''
UpperCAmelCase : str = get_mobilenet_va_config(lowerCAmelCase_ )
# Load 🤗 model
UpperCAmelCase : int = MobileNetVaForImageClassification(lowerCAmelCase_ ).eval()
# Load weights from TensorFlow checkpoint
load_tf_weights_in_mobilenet_va(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Check outputs on an image, prepared by MobileNetV1ImageProcessor
UpperCAmelCase : str = MobileNetVaImageProcessor(
crop_size={"width": config.image_size, "height": config.image_size} , size={"shortest_edge": config.image_size + 32} , )
UpperCAmelCase : Union[str, Any] = image_processor(images=prepare_img() , return_tensors="pt" )
UpperCAmelCase : Optional[int] = model(**lowerCAmelCase_ )
UpperCAmelCase : List[Any] = outputs.logits
assert logits.shape == (1, 1001)
if model_name == "mobilenet_v1_1.0_224":
UpperCAmelCase : List[str] = torch.tensor([-4.1_7_3_9, -1.1_2_3_3, 3.1_2_0_5] )
elif model_name == "mobilenet_v1_0.75_192":
UpperCAmelCase : Any = torch.tensor([-3.9_4_4_0, -2.3_1_4_1, -0.3_3_3_3] )
else:
UpperCAmelCase : str = None
if expected_logits is not None:
assert torch.allclose(logits[0, :3] , lowerCAmelCase_ , atol=1e-4 )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
print(F"Saving model {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(lowerCAmelCase_ )
print(F"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(lowerCAmelCase_ )
if push_to_hub:
print("Pushing to the hub..." )
UpperCAmelCase : List[str] = 'google/' + model_name
image_processor.push_to_hub(lowerCAmelCase_ )
model.push_to_hub(lowerCAmelCase_ )
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="mobilenet_v1_1.0_224",
type=str,
help="Name of the MobileNetV1 model you\'d like to convert. Should in the form \'mobilenet_v1_<depth>_<size>\'.",
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original TensorFlow checkpoint (.ckpt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
a : Optional[int] = parser.parse_args()
convert_movilevit_checkpoint(
args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 311 |
'''simple docstring'''
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> np.ndarray:
_a : Union[str, Any] = cva.getAffineTransform(lowerCAmelCase_ , lowerCAmelCase_ )
return cva.warpAffine(lowerCAmelCase_ , lowerCAmelCase_ , (rows, cols) )
if __name__ == "__main__":
# read original image
__lowerCAmelCase = cva.imread(
str(Path(__file__).resolve().parent.parent / '''image_data''' / '''lena.jpg''')
)
# turn image in gray scale value
__lowerCAmelCase = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
__lowerCAmelCase , __lowerCAmelCase = gray_img.shape
# set different points to rotate image
__lowerCAmelCase = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
__lowerCAmelCase = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
__lowerCAmelCase = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
__lowerCAmelCase = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
__lowerCAmelCase = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
__lowerCAmelCase = plt.figure(1)
__lowerCAmelCase = ['''Original''', '''Rotation 1''', '''Rotation 2''', '''Rotation 3''']
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, '''gray''')
plt.title(titles[i])
plt.axis('''off''')
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 89 | 0 |
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...file_utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward
from ...modeling_outputs import (
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import logging
from .configuration_regnet import RegNetConfig
a_ = logging.get_logger(__name__)
# General docstring
a_ = 'RegNetConfig'
# Base docstring
a_ = 'facebook/regnet-y-040'
a_ = [1, 1_0_8_8, 7, 7]
# Image classification docstring
a_ = 'facebook/regnet-y-040'
a_ = 'tabby, tabby cat'
a_ = [
'facebook/regnet-y-040',
# See all regnet models at https://huggingface.co/models?filter=regnet
]
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Optional[Any] , __lowercase : int , __lowercase : int , __lowercase : int = 3 , __lowercase : int = 1 , __lowercase : int = 1 , __lowercase : Optional[str] = "relu" , ) -> List[str]:
super().__init__()
SCREAMING_SNAKE_CASE__ : Optional[int] =nn.Convad(
_UpperCAmelCase , _UpperCAmelCase , kernel_size=_UpperCAmelCase , stride=_UpperCAmelCase , padding=kernel_size // 2 , groups=_UpperCAmelCase , bias=_UpperCAmelCase , )
SCREAMING_SNAKE_CASE__ : Any =nn.BatchNormad(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] =ACTaFN[activation] if activation is not None else nn.Identity()
def __magic_name__ ( self : int , __lowercase : List[str] ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Dict =self.convolution(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] =self.normalization(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] =self.activation(_UpperCAmelCase )
return hidden_state
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Any , __lowercase : RegNetConfig ) -> Optional[Any]:
super().__init__()
SCREAMING_SNAKE_CASE__ : List[Any] =RegNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=3 , stride=2 , activation=config.hidden_act )
SCREAMING_SNAKE_CASE__ : List[str] =config.num_channels
def __magic_name__ ( self : str , __lowercase : Tuple ) -> str:
SCREAMING_SNAKE_CASE__ : Tuple =pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'''Make sure that the channel dimension of the pixel values match with the one set in the configuration.''' )
SCREAMING_SNAKE_CASE__ : Optional[int] =self.embedder(_UpperCAmelCase )
return hidden_state
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : List[Any] , __lowercase : int , __lowercase : int , __lowercase : int = 2 ) -> List[str]:
super().__init__()
SCREAMING_SNAKE_CASE__ : str =nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , stride=_UpperCAmelCase , bias=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[Any] =nn.BatchNormad(_UpperCAmelCase )
def __magic_name__ ( self : Dict , __lowercase : Tensor ) -> Any:
SCREAMING_SNAKE_CASE__ : int =self.convolution(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Any =self.normalization(_UpperCAmelCase )
return hidden_state
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Optional[int] , __lowercase : int , __lowercase : int ) -> str:
super().__init__()
SCREAMING_SNAKE_CASE__ : Optional[int] =nn.AdaptiveAvgPoolad((1, 1) )
SCREAMING_SNAKE_CASE__ : int =nn.Sequential(
nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 ) , nn.ReLU() , nn.Convad(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 ) , nn.Sigmoid() , )
def __magic_name__ ( self : Optional[Any] , __lowercase : Optional[int] ) -> Any:
# b c h w -> b c 1 1
SCREAMING_SNAKE_CASE__ : int =self.pooler(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict =self.attention(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[str] =hidden_state * attention
return hidden_state
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : List[Any] , __lowercase : RegNetConfig , __lowercase : int , __lowercase : int , __lowercase : int = 1 ) -> List[Any]:
super().__init__()
SCREAMING_SNAKE_CASE__ : str =in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE__ : int =max(1 , out_channels // config.groups_width )
SCREAMING_SNAKE_CASE__ : Tuple =(
RegNetShortCut(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase ) if should_apply_shortcut else nn.Identity()
)
SCREAMING_SNAKE_CASE__ : Any =nn.Sequential(
RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase ) , )
SCREAMING_SNAKE_CASE__ : Any =ACTaFN[config.hidden_act]
def __magic_name__ ( self : List[Any] , __lowercase : str ) -> int:
SCREAMING_SNAKE_CASE__ : str =hidden_state
SCREAMING_SNAKE_CASE__ : str =self.layer(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple =self.shortcut(_UpperCAmelCase )
hidden_state += residual
SCREAMING_SNAKE_CASE__ : Optional[Any] =self.activation(_UpperCAmelCase )
return hidden_state
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Any , __lowercase : RegNetConfig , __lowercase : int , __lowercase : int , __lowercase : int = 1 ) -> str:
super().__init__()
SCREAMING_SNAKE_CASE__ : Optional[int] =in_channels != out_channels or stride != 1
SCREAMING_SNAKE_CASE__ : str =max(1 , out_channels // config.groups_width )
SCREAMING_SNAKE_CASE__ : Dict =(
RegNetShortCut(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase ) if should_apply_shortcut else nn.Identity()
)
SCREAMING_SNAKE_CASE__ : int =nn.Sequential(
RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=config.hidden_act ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , groups=_UpperCAmelCase , activation=config.hidden_act ) , RegNetSELayer(_UpperCAmelCase , reduced_channels=int(round(in_channels / 4 ) ) ) , RegNetConvLayer(_UpperCAmelCase , _UpperCAmelCase , kernel_size=1 , activation=_UpperCAmelCase ) , )
SCREAMING_SNAKE_CASE__ : Optional[int] =ACTaFN[config.hidden_act]
def __magic_name__ ( self : Dict , __lowercase : Any ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ : List[str] =hidden_state
SCREAMING_SNAKE_CASE__ : Tuple =self.layer(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict =self.shortcut(_UpperCAmelCase )
hidden_state += residual
SCREAMING_SNAKE_CASE__ : List[str] =self.activation(_UpperCAmelCase )
return hidden_state
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : Any , __lowercase : RegNetConfig , __lowercase : int , __lowercase : int , __lowercase : int = 2 , __lowercase : int = 2 , ) -> Optional[int]:
super().__init__()
SCREAMING_SNAKE_CASE__ : Dict =RegNetXLayer if config.layer_type == 'x' else RegNetYLayer
SCREAMING_SNAKE_CASE__ : Any =nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , stride=_UpperCAmelCase , ) , *[layer(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase ) for _ in range(depth - 1 )] , )
def __magic_name__ ( self : Union[str, Any] , __lowercase : Optional[Any] ) -> str:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =self.layers(_UpperCAmelCase )
return hidden_state
class __SCREAMING_SNAKE_CASE ( nn.Module ):
def __init__( self : str , __lowercase : RegNetConfig ) -> int:
super().__init__()
SCREAMING_SNAKE_CASE__ : Dict =nn.ModuleList([] )
# based on `downsample_in_first_stage`, the first layer of the first stage may or may not downsample the input
self.stages.append(
RegNetStage(
_UpperCAmelCase , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
SCREAMING_SNAKE_CASE__ : int =zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_UpperCAmelCase , config.depths[1:] ):
self.stages.append(RegNetStage(_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , depth=_UpperCAmelCase ) )
def __magic_name__ ( self : Union[str, Any] , __lowercase : Tensor , __lowercase : bool = False , __lowercase : bool = True ) -> List[str]:
SCREAMING_SNAKE_CASE__ : Optional[int] =() if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
SCREAMING_SNAKE_CASE__ : str =hidden_states + (hidden_state,)
SCREAMING_SNAKE_CASE__ : Any =stage_module(_UpperCAmelCase )
if output_hidden_states:
SCREAMING_SNAKE_CASE__ : str =hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_UpperCAmelCase , hidden_states=_UpperCAmelCase )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
snake_case_ = RegNetConfig
snake_case_ = 'regnet'
snake_case_ = 'pixel_values'
snake_case_ = True
def __magic_name__ ( self : Dict , __lowercase : Dict ) -> Any:
if isinstance(_UpperCAmelCase , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='''fan_out''' , nonlinearity='''relu''' )
elif isinstance(_UpperCAmelCase , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def __magic_name__ ( self : Union[str, Any] , __lowercase : List[str] , __lowercase : Dict=False ) -> Union[str, Any]:
if isinstance(_UpperCAmelCase , _UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ : Union[str, Any] =value
a_ = R'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`RegNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
a_ = R'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""The bare RegNet model outputting raw features without any specific head on top.""" , _UpperCamelCase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetModel with RESNET->REGNET,ResNet->RegNet
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
def __init__( self : Optional[Any] , __lowercase : Any ) -> List[Any]:
super().__init__(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] =config
SCREAMING_SNAKE_CASE__ : int =RegNetEmbeddings(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str =RegNetEncoder(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] =nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , modality='''vision''' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __magic_name__ ( self : List[str] , __lowercase : Tensor , __lowercase : Optional[bool] = None , __lowercase : Optional[bool] = None ) -> Dict:
SCREAMING_SNAKE_CASE__ : Any =(
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
SCREAMING_SNAKE_CASE__ : Optional[int] =return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE__ : List[str] =self.embedder(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Tuple =self.encoder(
_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] =encoder_outputs[0]
SCREAMING_SNAKE_CASE__ : int =self.pooler(_UpperCAmelCase )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_UpperCAmelCase , pooler_output=_UpperCAmelCase , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""\n RegNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n """ , _UpperCamelCase , )
# Copied from transformers.models.resnet.modeling_resnet.ResNetForImageClassification with RESNET->REGNET,ResNet->RegNet,resnet->regnet
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
def __init__( self : Union[str, Any] , __lowercase : Dict ) -> List[str]:
super().__init__(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : List[Any] =config.num_labels
SCREAMING_SNAKE_CASE__ : Any =RegNetModel(_UpperCAmelCase )
# classification head
SCREAMING_SNAKE_CASE__ : Union[str, Any] =nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_UpperCAmelCase )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_UpperCAmelCase , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __magic_name__ ( self : Dict , __lowercase : Optional[torch.FloatTensor] = None , __lowercase : Optional[torch.LongTensor] = None , __lowercase : Optional[bool] = None , __lowercase : Optional[bool] = None , ) -> str:
SCREAMING_SNAKE_CASE__ : Any =return_dict if return_dict is not None else self.config.use_return_dict
SCREAMING_SNAKE_CASE__ : Optional[int] =self.regnet(_UpperCAmelCase , output_hidden_states=_UpperCAmelCase , return_dict=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Optional[int] =outputs.pooler_output if return_dict else outputs[1]
SCREAMING_SNAKE_CASE__ : List[Any] =self.classifier(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : Dict =None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
SCREAMING_SNAKE_CASE__ : Tuple ='regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
SCREAMING_SNAKE_CASE__ : Any ='single_label_classification'
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] ='multi_label_classification'
if self.config.problem_type == "regression":
SCREAMING_SNAKE_CASE__ : List[str] =MSELoss()
if self.num_labels == 1:
SCREAMING_SNAKE_CASE__ : int =loss_fct(logits.squeeze() , labels.squeeze() )
else:
SCREAMING_SNAKE_CASE__ : Union[str, Any] =loss_fct(_UpperCAmelCase , _UpperCAmelCase )
elif self.config.problem_type == "single_label_classification":
SCREAMING_SNAKE_CASE__ : List[Any] =CrossEntropyLoss()
SCREAMING_SNAKE_CASE__ : Tuple =loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
SCREAMING_SNAKE_CASE__ : List[Any] =BCEWithLogitsLoss()
SCREAMING_SNAKE_CASE__ : int =loss_fct(_UpperCAmelCase , _UpperCAmelCase )
if not return_dict:
SCREAMING_SNAKE_CASE__ : int =(logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_UpperCAmelCase , logits=_UpperCAmelCase , hidden_states=outputs.hidden_states ) | 152 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase = {
'''configuration_bigbird_pegasus''': [
'''BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BigBirdPegasusConfig''',
'''BigBirdPegasusOnnxConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BigBirdPegasusForCausalLM''',
'''BigBirdPegasusForConditionalGeneration''',
'''BigBirdPegasusForQuestionAnswering''',
'''BigBirdPegasusForSequenceClassification''',
'''BigBirdPegasusModel''',
'''BigBirdPegasusPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89 | 0 |
from .imports import is_tqdm_available
if is_tqdm_available():
from tqdm.auto import tqdm as _tqdm
from ..state import PartialState
def UpperCamelCase__ ( A__ = True , *A__ , **A__ ) -> Dict:
if not is_tqdm_available():
raise ImportError('Accelerate\'s `tqdm` module requires `tqdm` to be installed. Please run `pip install tqdm`.' )
snake_case__ : Tuple = False
if main_process_only:
snake_case__ : str = PartialState().local_process_index == 0
return _tqdm(*lowerCAmelCase_ , **lowerCAmelCase_ , disable=lowerCAmelCase_ )
| 143 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=1024 , lowerCAmelCase_=1024 , lowerCAmelCase_=False , **lowerCAmelCase_ ) -> List[Any]:
_a : str = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
_a : List[Any] = SeqaSeqDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , type_path='train' , **lowerCAmelCase_ )
_a : List[str] = tok.pad_token_id
def get_lens(lowerCAmelCase_ ):
_a : Dict = tqdm(
DataLoader(lowerCAmelCase_ , batch_size=512 , num_workers=8 , shuffle=lowerCAmelCase_ , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
_a : Union[str, Any] = []
for batch in dl:
_a : Optional[Any] = batch['input_ids'].ne(lowerCAmelCase_ ).sum(1 ).tolist()
_a : Optional[Any] = batch['labels'].ne(lowerCAmelCase_ ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(lowerCAmelCase_ , lowerCAmelCase_ ):
max_lens.append(max(lowerCAmelCase_ , lowerCAmelCase_ ) )
else:
max_lens.extend(lowerCAmelCase_ )
return max_lens
_a : str = get_lens(lowerCAmelCase_ )
_a : Optional[int] = SeqaSeqDataset(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , type_path='val' , **lowerCAmelCase_ )
_a : Dict = get_lens(lowerCAmelCase_ )
pickle_save(lowerCAmelCase_ , train_ds.len_file )
pickle_save(lowerCAmelCase_ , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 89 | 0 |
from __future__ import annotations
_a = 8.988e9 # units = N * m^s * C^-2
def __A ( __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )-> dict[str, float]:
"""simple docstring"""
_UpperCAmelCase = abs(chargea * chargea )
if (force, chargea, chargea, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if distance < 0:
raise ValueError('Distance cannot be negative' )
if force == 0:
_UpperCAmelCase = COULOMBS_CONSTANT * charge_product / (distance**2)
return {"force": force}
elif chargea == 0:
_UpperCAmelCase = abs(lowerCAmelCase_ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge1": chargea}
elif chargea == 0:
_UpperCAmelCase = abs(lowerCAmelCase_ ) * (distance**2) / (COULOMBS_CONSTANT * chargea)
return {"charge2": chargea}
elif distance == 0:
_UpperCAmelCase = (COULOMBS_CONSTANT * charge_product / abs(lowerCAmelCase_ )) ** 0.5
return {"distance": distance}
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 39 |
'''simple docstring'''
from typing import Any
class __magic_name__ :
def __init__( self : List[Any] ,_UpperCAmelCase : Any ):
_a : List[Any] = data
_a : Union[str, Any] = None
def __repr__( self : Any ):
return F"""Node({self.data})"""
class __magic_name__ :
def __init__( self : int ):
_a : Tuple = None
def __iter__( self : str ):
_a : int = self.head
while node:
yield node.data
_a : Union[str, Any] = node.next
def __len__( self : Optional[Any] ):
return sum(1 for _ in self )
def __repr__( self : str ):
return "->".join([str(_UpperCAmelCase ) for item in self] )
def __getitem__( self : Tuple ,_UpperCAmelCase : int ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
for i, node in enumerate(self ):
if i == index:
return node
return None
def __setitem__( self : Union[str, Any] ,_UpperCAmelCase : int ,_UpperCAmelCase : Any ):
if not 0 <= index < len(self ):
raise ValueError('list index out of range.' )
_a : Any = self.head
for _ in range(_UpperCAmelCase ):
_a : Optional[Any] = current.next
_a : Optional[int] = data
def __lowercase ( self : Optional[int] ,_UpperCAmelCase : Any ):
self.insert_nth(len(self ) ,_UpperCAmelCase )
def __lowercase ( self : Union[str, Any] ,_UpperCAmelCase : Any ):
self.insert_nth(0 ,_UpperCAmelCase )
def __lowercase ( self : str ,_UpperCAmelCase : int ,_UpperCAmelCase : Any ):
if not 0 <= index <= len(self ):
raise IndexError('list index out of range' )
_a : int = Node(_UpperCAmelCase )
if self.head is None:
_a : str = new_node
elif index == 0:
_a : List[str] = self.head # link new_node to head
_a : Union[str, Any] = new_node
else:
_a : int = self.head
for _ in range(index - 1 ):
_a : Union[str, Any] = temp.next
_a : List[str] = temp.next
_a : Optional[int] = new_node
def __lowercase ( self : Optional[int] ): # print every node data
print(self )
def __lowercase ( self : str ):
return self.delete_nth(0 )
def __lowercase ( self : str ): # delete from tail
return self.delete_nth(len(self ) - 1 )
def __lowercase ( self : List[str] ,_UpperCAmelCase : int = 0 ):
if not 0 <= index <= len(self ) - 1: # test if index is valid
raise IndexError('List index out of range.' )
_a : Optional[Any] = self.head # default first node
if index == 0:
_a : int = self.head.next
else:
_a : int = self.head
for _ in range(index - 1 ):
_a : str = temp.next
_a : str = temp.next
_a : int = temp.next.next
return delete_node.data
def __lowercase ( self : List[Any] ):
return self.head is None
def __lowercase ( self : Tuple ):
_a : List[Any] = None
_a : Tuple = self.head
while current:
# Store the current node's next node.
_a : Dict = current.next
# Make the current node's next point backwards
_a : str = prev
# Make the previous node be the current node
_a : Tuple = current
# Make the current node the next node (to progress iteration)
_a : Optional[Any] = next_node
# Return prev in order to put the head at the end
_a : int = prev
def __lowerCamelCase ( ) -> None:
_a : List[str] = LinkedList()
assert linked_list.is_empty() is True
assert str(lowerCAmelCase_ ) == ""
try:
linked_list.delete_head()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
try:
linked_list.delete_tail()
raise AssertionError # This should not happen.
except IndexError:
assert True # This should happen.
for i in range(10 ):
assert len(lowerCAmelCase_ ) == i
linked_list.insert_nth(lowerCAmelCase_ , i + 1 )
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(1 , 11 ) )
linked_list.insert_head(0 )
linked_list.insert_tail(11 )
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(0 , 12 ) )
assert linked_list.delete_head() == 0
assert linked_list.delete_nth(9 ) == 10
assert linked_list.delete_tail() == 11
assert len(lowerCAmelCase_ ) == 9
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(1 , 10 ) )
assert all(linked_list[i] == i + 1 for i in range(0 , 9 ) ) is True
for i in range(0 , 9 ):
_a : Union[str, Any] = -i
assert all(linked_list[i] == -i for i in range(0 , 9 ) ) is True
linked_list.reverse()
assert str(lowerCAmelCase_ ) == "->".join(str(lowerCAmelCase_ ) for i in range(-8 , 1 ) )
def __lowerCamelCase ( ) -> None:
_a : Dict = [
-9,
100,
Node(77345112 ),
'dlrow olleH',
7,
5555,
0,
-192.55_555,
'Hello, world!',
77.9,
Node(10 ),
None,
None,
12.20,
]
_a : List[Any] = LinkedList()
for i in test_input:
linked_list.insert_tail(lowerCAmelCase_ )
# Check if it's empty or not
assert linked_list.is_empty() is False
assert (
str(lowerCAmelCase_ ) == "-9->100->Node(77345112)->dlrow olleH->7->5555->0->"
"-192.55555->Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the head
_a : List[str] = linked_list.delete_head()
assert result == -9
assert (
str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None->12.2"
)
# Delete the tail
_a : Dict = linked_list.delete_tail()
assert result == 12.2
assert (
str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None->None"
)
# Delete a node in specific location in linked list
_a : Optional[Any] = linked_list.delete_nth(10 )
assert result is None
assert (
str(lowerCAmelCase_ ) == "100->Node(77345112)->dlrow olleH->7->5555->0->-192.55555->"
"Hello, world!->77.9->Node(10)->None"
)
# Add a Node instance to its head
linked_list.insert_head(Node('Hello again, world!' ) )
assert (
str(lowerCAmelCase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None"
)
# Add None to its tail
linked_list.insert_tail(lowerCAmelCase_ )
assert (
str(lowerCAmelCase_ )
== "Node(Hello again, world!)->100->Node(77345112)->dlrow olleH->"
"7->5555->0->-192.55555->Hello, world!->77.9->Node(10)->None->None"
)
# Reverse the linked list
linked_list.reverse()
assert (
str(lowerCAmelCase_ )
== "None->None->Node(10)->77.9->Hello, world!->-192.55555->0->5555->"
"7->dlrow olleH->Node(77345112)->100->Node(Hello again, world!)"
)
def __lowerCamelCase ( ) -> Union[str, Any]:
from doctest import testmod
testmod()
_a : Optional[int] = LinkedList()
linked_list.insert_head(input('Inserting 1st at head ' ).strip() )
linked_list.insert_head(input('Inserting 2nd at head ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
linked_list.insert_tail(input('\nInserting 1st at tail ' ).strip() )
linked_list.insert_tail(input('Inserting 2nd at tail ' ).strip() )
print('\nPrint list:' )
linked_list.print_list()
print('\nDelete head' )
linked_list.delete_head()
print('Delete tail' )
linked_list.delete_tail()
print('\nPrint list:' )
linked_list.print_list()
print('\nReverse linked list' )
linked_list.reverse()
print('\nPrint list:' )
linked_list.print_list()
print('\nString representation of linked list:' )
print(lowerCAmelCase_ )
print('\nReading/changing Node data using indexing:' )
print(f"""Element at Position 1: {linked_list[1]}""" )
_a : Optional[Any] = input('Enter New Value: ' ).strip()
print('New list:' )
print(lowerCAmelCase_ )
print(f"""length of linked_list is : {len(lowerCAmelCase_ )}""" )
if __name__ == "__main__":
main()
| 89 | 0 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
A : int = 1_6
A : Dict = 3_2
def __lowerCamelCase ( __a :Any , __a :Optional[int] = 1_6 , __a :Optional[Any] = "bert-base-cased" ) -> str:
"""simple docstring"""
A__ = AutoTokenizer.from_pretrained(lowerCAmelCase_ )
A__ = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(__a :List[Any] ):
# max_length=None => use the model max length (it's actually the default)
A__ = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A__ = datasets.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=lowerCAmelCase_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A__ = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(__a :Optional[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(lowerCAmelCase_ , padding="""max_length""" , max_length=1_2_8 , return_tensors="""pt""" )
return tokenizer.pad(lowerCAmelCase_ , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
A__ = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
A__ = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
return train_dataloader, eval_dataloader
def __lowerCamelCase ( __a :Optional[Any] , __a :List[Any] , __a :Any , __a :Dict ) -> List[Any]:
"""simple docstring"""
model.eval()
A__ = 0
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A__ = model(**lowerCAmelCase_ )
A__ = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
A__ = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(lowerCAmelCase_ ) - 1:
A__ = predictions[: len(eval_dataloader.dataset ) - samples_seen]
A__ = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=lowerCAmelCase_ , references=lowerCAmelCase_ , )
A__ = metric.compute()
return eval_metric["accuracy"]
def __lowerCamelCase ( __a :Tuple , __a :Tuple ) -> Any:
"""simple docstring"""
A__ = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A__ = config['lr']
A__ = int(config["""num_epochs"""] )
A__ = int(config["""seed"""] )
A__ = int(config["""batch_size"""] )
A__ = args.model_name_or_path
set_seed(lowerCAmelCase_ )
A__ = get_dataloaders(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A__ = AutoModelForSequenceClassification.from_pretrained(lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
# Instantiate optimizer
A__ = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
A__ = optimizer_cls(params=model.parameters() , lr=lowerCAmelCase_ )
if accelerator.state.deepspeed_plugin is not None:
A__ = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
A__ = 1
A__ = (len(lowerCAmelCase_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
A__ = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_ , num_warmup_steps=0 , num_training_steps=lowerCAmelCase_ , )
else:
A__ = DummyScheduler(lowerCAmelCase_ , total_num_steps=lowerCAmelCase_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A__ = accelerator.prepare(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# We need to keep track of how many total steps we have iterated over
A__ = 0
# We also need to keep track of the stating epoch so files are named properly
A__ = 0
A__ = evaluate.load("""glue""" , """mrpc""" )
A__ = num_epochs
if args.partial_train_epoch is not None:
A__ = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
A__ = args.resume_from_checkpoint.split("""epoch_""" )[1]
A__ = ''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
A__ = int(lowerCAmelCase_ ) + 1
A__ = evaluation_loop(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
accelerator.print("""resumed checkpoint performance:""" , lowerCAmelCase_ )
accelerator.print("""resumed checkpoint\'s scheduler\'s lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers\'s lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , F'state_{starting_epoch-1}.json' ) , """r""" ) as f:
A__ = json.load(lowerCAmelCase_ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
A__ = {}
for epoch in range(lowerCAmelCase_ , lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
A__ = model(**lowerCAmelCase_ )
A__ = outputs.loss
A__ = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
A__ = F'epoch_{epoch}'
A__ = os.path.join(args.output_dir , lowerCAmelCase_ )
accelerator.save_state(lowerCAmelCase_ )
A__ = evaluation_loop(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
A__ = accuracy
A__ = lr_scheduler.get_lr()[0]
A__ = optimizer.param_groups[0]['lr']
A__ = epoch
A__ = overall_step
accelerator.print(F'epoch {epoch}:' , lowerCAmelCase_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F'state_{epoch}.json' ) , """w""" ) as f:
json.dump(lowerCAmelCase_ , lowerCAmelCase_ )
def __lowerCamelCase ( ) -> List[Any]:
"""simple docstring"""
A__ = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=lowerCAmelCase_ , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=lowerCAmelCase_ , )
parser.add_argument(
"""--output_dir""" , type=lowerCAmelCase_ , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=lowerCAmelCase_ , default=lowerCAmelCase_ , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=lowerCAmelCase_ , default=2 , help="""Number of train epochs.""" , )
A__ = parser.parse_args()
A__ = {'lr': 2E-5, 'num_epochs': args.num_epochs, 'seed': 4_2, 'batch_size': 1_6}
training_function(lowerCAmelCase_ , lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 274 |
'''simple docstring'''
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
__lowerCAmelCase = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class __magic_name__ ( unittest.TestCase ):
def __lowercase ( self : str ,_UpperCAmelCase : Path ,_UpperCAmelCase : Union[str, None] = None ,_UpperCAmelCase : Union[List[str], None] = None ,_UpperCAmelCase : Union[str, List[str], None] = None ,_UpperCAmelCase : bool = True ,):
_a : Dict = [file for file in os.listdir(_UpperCAmelCase ) if os.path.isfile(os.path.join(_UpperCAmelCase ,_UpperCAmelCase ) )]
if identifier is not None:
_a : str = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
for n_ in n_identifier:
_a : int = [file for file in files if n_ not in file]
else:
_a : Optional[Any] = [file for file in files if n_identifier not in file]
_a : Dict = ignore_files or []
ignore_files.append('__init__.py' )
_a : List[str] = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' ,_UpperCAmelCase )
if only_modules:
_a : Any = file.split('.' )[0]
try:
_a : Optional[int] = getattr(_UpperCAmelCase ,_UpperCAmelCase )
_a : Dict = doctest.DocTestSuite(_UpperCAmelCase )
_a : Optional[int] = unittest.TextTestRunner().run(_UpperCAmelCase )
self.assertIs(len(result.failures ) ,0 )
except AttributeError:
logger.info(F"""{module_identifier} is not a module.""" )
else:
_a : str = doctest.testfile(str('..' / directory / file ) ,optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed ,0 )
def __lowercase ( self : Union[str, Any] ):
_a : Optional[Any] = Path('src/transformers' )
_a : Optional[Any] = 'modeling'
_a : Union[str, Any] = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(_UpperCAmelCase ,identifier=_UpperCAmelCase ,ignore_files=_UpperCAmelCase )
def __lowercase ( self : int ):
_a : str = Path('src/transformers' )
_a : List[str] = 'tokenization'
self.analyze_directory(_UpperCAmelCase ,identifier=_UpperCAmelCase )
def __lowercase ( self : int ):
_a : Any = Path('src/transformers' )
_a : str = 'configuration'
self.analyze_directory(_UpperCAmelCase ,identifier=_UpperCAmelCase )
def __lowercase ( self : Dict ):
_a : Tuple = Path('src/transformers' )
_a : Optional[int] = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(_UpperCAmelCase ,n_identifier=_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ):
_a : Union[str, Any] = Path('docs/source' )
_a : List[str] = ['favicon.ico']
self.analyze_directory(_UpperCAmelCase ,ignore_files=_UpperCAmelCase ,only_modules=_UpperCAmelCase )
| 89 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : Optional[int] = {
'''configuration_funnel''': ['''FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FunnelConfig'''],
'''convert_funnel_original_tf_checkpoint_to_pytorch''': [],
'''tokenization_funnel''': ['''FunnelTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Any = ['''FunnelTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = [
'''FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FunnelBaseModel''',
'''FunnelForMaskedLM''',
'''FunnelForMultipleChoice''',
'''FunnelForPreTraining''',
'''FunnelForQuestionAnswering''',
'''FunnelForSequenceClassification''',
'''FunnelForTokenClassification''',
'''FunnelModel''',
'''FunnelPreTrainedModel''',
'''load_tf_weights_in_funnel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[int] = [
'''TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFFunnelBaseModel''',
'''TFFunnelForMaskedLM''',
'''TFFunnelForMultipleChoice''',
'''TFFunnelForPreTraining''',
'''TFFunnelForQuestionAnswering''',
'''TFFunnelForSequenceClassification''',
'''TFFunnelForTokenClassification''',
'''TFFunnelModel''',
'''TFFunnelPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 18 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all image processors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...image_processing_utils import ImageProcessingMixin
from ...utils import CONFIG_NAME, IMAGE_PROCESSOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = OrderedDict(
[
('''align''', '''EfficientNetImageProcessor'''),
('''beit''', '''BeitImageProcessor'''),
('''bit''', '''BitImageProcessor'''),
('''blip''', '''BlipImageProcessor'''),
('''blip-2''', '''BlipImageProcessor'''),
('''bridgetower''', '''BridgeTowerImageProcessor'''),
('''chinese_clip''', '''ChineseCLIPImageProcessor'''),
('''clip''', '''CLIPImageProcessor'''),
('''clipseg''', '''ViTImageProcessor'''),
('''conditional_detr''', '''ConditionalDetrImageProcessor'''),
('''convnext''', '''ConvNextImageProcessor'''),
('''convnextv2''', '''ConvNextImageProcessor'''),
('''cvt''', '''ConvNextImageProcessor'''),
('''data2vec-vision''', '''BeitImageProcessor'''),
('''deformable_detr''', '''DeformableDetrImageProcessor'''),
('''deit''', '''DeiTImageProcessor'''),
('''deta''', '''DetaImageProcessor'''),
('''detr''', '''DetrImageProcessor'''),
('''dinat''', '''ViTImageProcessor'''),
('''donut-swin''', '''DonutImageProcessor'''),
('''dpt''', '''DPTImageProcessor'''),
('''efficientformer''', '''EfficientFormerImageProcessor'''),
('''efficientnet''', '''EfficientNetImageProcessor'''),
('''flava''', '''FlavaImageProcessor'''),
('''focalnet''', '''BitImageProcessor'''),
('''git''', '''CLIPImageProcessor'''),
('''glpn''', '''GLPNImageProcessor'''),
('''groupvit''', '''CLIPImageProcessor'''),
('''imagegpt''', '''ImageGPTImageProcessor'''),
('''instructblip''', '''BlipImageProcessor'''),
('''layoutlmv2''', '''LayoutLMv2ImageProcessor'''),
('''layoutlmv3''', '''LayoutLMv3ImageProcessor'''),
('''levit''', '''LevitImageProcessor'''),
('''mask2former''', '''Mask2FormerImageProcessor'''),
('''maskformer''', '''MaskFormerImageProcessor'''),
('''mgp-str''', '''ViTImageProcessor'''),
('''mobilenet_v1''', '''MobileNetV1ImageProcessor'''),
('''mobilenet_v2''', '''MobileNetV2ImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevit''', '''MobileViTImageProcessor'''),
('''mobilevitv2''', '''MobileViTImageProcessor'''),
('''nat''', '''ViTImageProcessor'''),
('''oneformer''', '''OneFormerImageProcessor'''),
('''owlvit''', '''OwlViTImageProcessor'''),
('''perceiver''', '''PerceiverImageProcessor'''),
('''pix2struct''', '''Pix2StructImageProcessor'''),
('''poolformer''', '''PoolFormerImageProcessor'''),
('''regnet''', '''ConvNextImageProcessor'''),
('''resnet''', '''ConvNextImageProcessor'''),
('''sam''', '''SamImageProcessor'''),
('''segformer''', '''SegformerImageProcessor'''),
('''swiftformer''', '''ViTImageProcessor'''),
('''swin''', '''ViTImageProcessor'''),
('''swin2sr''', '''Swin2SRImageProcessor'''),
('''swinv2''', '''ViTImageProcessor'''),
('''table-transformer''', '''DetrImageProcessor'''),
('''timesformer''', '''VideoMAEImageProcessor'''),
('''tvlt''', '''TvltImageProcessor'''),
('''upernet''', '''SegformerImageProcessor'''),
('''van''', '''ConvNextImageProcessor'''),
('''videomae''', '''VideoMAEImageProcessor'''),
('''vilt''', '''ViltImageProcessor'''),
('''vit''', '''ViTImageProcessor'''),
('''vit_hybrid''', '''ViTHybridImageProcessor'''),
('''vit_mae''', '''ViTImageProcessor'''),
('''vit_msn''', '''ViTImageProcessor'''),
('''xclip''', '''CLIPImageProcessor'''),
('''yolos''', '''YolosImageProcessor'''),
]
)
__lowerCAmelCase = _LazyAutoMapping(CONFIG_MAPPING_NAMES, IMAGE_PROCESSOR_MAPPING_NAMES)
def __lowerCamelCase ( lowerCAmelCase_ ) -> Optional[Any]:
for module_name, extractors in IMAGE_PROCESSOR_MAPPING_NAMES.items():
if class_name in extractors:
_a : List[Any] = model_type_to_module_name(lowerCAmelCase_ )
_a : Optional[Any] = importlib.import_module(f""".{module_name}""" , 'transformers.models' )
try:
return getattr(lowerCAmelCase_ , lowerCAmelCase_ )
except AttributeError:
continue
for _, extractor in IMAGE_PROCESSOR_MAPPING._extra_content.items():
if getattr(lowerCAmelCase_ , '__name__' , lowerCAmelCase_ ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
_a : Dict = importlib.import_module('transformers' )
if hasattr(lowerCAmelCase_ , lowerCAmelCase_ ):
return getattr(lowerCAmelCase_ , lowerCAmelCase_ )
return None
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = False , lowerCAmelCase_ = False , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = False , **lowerCAmelCase_ , ) -> Tuple:
_a : List[str] = get_file_from_repo(
lowerCAmelCase_ , lowerCAmelCase_ , cache_dir=lowerCAmelCase_ , force_download=lowerCAmelCase_ , resume_download=lowerCAmelCase_ , proxies=lowerCAmelCase_ , use_auth_token=lowerCAmelCase_ , revision=lowerCAmelCase_ , local_files_only=lowerCAmelCase_ , )
if resolved_config_file is None:
logger.info(
'Could not locate the image processor configuration file, will try to use the model config instead.' )
return {}
with open(lowerCAmelCase_ , encoding='utf-8' ) as reader:
return json.load(lowerCAmelCase_ )
class __magic_name__ :
def __init__( self : List[str] ):
raise EnvironmentError(
'AutoImageProcessor is designed to be instantiated '
'using the `AutoImageProcessor.from_pretrained(pretrained_model_name_or_path)` method.' )
@classmethod
@replace_list_option_in_docstrings(_UpperCAmelCase )
def __lowercase ( cls : Dict ,_UpperCAmelCase : Union[str, Any] ,**_UpperCAmelCase : Optional[Any] ):
_a : Any = kwargs.pop('config' ,_UpperCAmelCase )
_a : Dict = kwargs.pop('trust_remote_code' ,_UpperCAmelCase )
_a : Any = True
_a , _a : Tuple = ImageProcessingMixin.get_image_processor_dict(_UpperCAmelCase ,**_UpperCAmelCase )
_a : List[Any] = config_dict.get('image_processor_type' ,_UpperCAmelCase )
_a : int = None
if "AutoImageProcessor" in config_dict.get('auto_map' ,{} ):
_a : Any = config_dict['auto_map']['AutoImageProcessor']
# If we still don't have the image processor class, check if we're loading from a previous feature extractor config
# and if so, infer the image processor class from there.
if image_processor_class is None and image_processor_auto_map is None:
_a : List[Any] = config_dict.pop('feature_extractor_type' ,_UpperCAmelCase )
if feature_extractor_class is not None:
logger.warning(
'Could not find image processor class in the image processor config or the model config. Loading'
' based on pattern matching with the model\'s feature extractor configuration.' )
_a : Optional[int] = feature_extractor_class.replace('FeatureExtractor' ,'ImageProcessor' )
if "AutoFeatureExtractor" in config_dict.get('auto_map' ,{} ):
_a : List[Any] = config_dict['auto_map']['AutoFeatureExtractor']
_a : List[str] = feature_extractor_auto_map.replace('FeatureExtractor' ,'ImageProcessor' )
logger.warning(
'Could not find image processor auto map in the image processor config or the model config.'
' Loading based on pattern matching with the model\'s feature extractor configuration.' )
# If we don't find the image processor class in the image processor config, let's try the model config.
if image_processor_class is None and image_processor_auto_map is None:
if not isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : Dict = AutoConfig.from_pretrained(_UpperCAmelCase ,**_UpperCAmelCase )
# It could be in `config.image_processor_type``
_a : Optional[int] = getattr(_UpperCAmelCase ,'image_processor_type' ,_UpperCAmelCase )
if hasattr(_UpperCAmelCase ,'auto_map' ) and "AutoImageProcessor" in config.auto_map:
_a : Union[str, Any] = config.auto_map['AutoImageProcessor']
if image_processor_class is not None:
_a : Optional[int] = image_processor_class_from_name(_UpperCAmelCase )
_a : List[str] = image_processor_auto_map is not None
_a : Optional[int] = image_processor_class is not None or type(_UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING
_a : Optional[int] = resolve_trust_remote_code(
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
if has_remote_code and trust_remote_code:
_a : Dict = get_class_from_dynamic_module(
_UpperCAmelCase ,_UpperCAmelCase ,**_UpperCAmelCase )
_a : int = kwargs.pop('code_revision' ,_UpperCAmelCase )
if os.path.isdir(_UpperCAmelCase ):
image_processor_class.register_for_auto_class()
return image_processor_class.from_dict(_UpperCAmelCase ,**_UpperCAmelCase )
elif image_processor_class is not None:
return image_processor_class.from_dict(_UpperCAmelCase ,**_UpperCAmelCase )
# Last try: we use the IMAGE_PROCESSOR_MAPPING.
elif type(_UpperCAmelCase ) in IMAGE_PROCESSOR_MAPPING:
_a : Dict = IMAGE_PROCESSOR_MAPPING[type(_UpperCAmelCase )]
return image_processor_class.from_dict(_UpperCAmelCase ,**_UpperCAmelCase )
raise ValueError(
F"""Unrecognized image processor in {pretrained_model_name_or_path}. Should have a """
F"""`image_processor_type` key in its {IMAGE_PROCESSOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in IMAGE_PROCESSOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def __lowercase ( _UpperCAmelCase : Optional[Any] ,_UpperCAmelCase : Dict ):
IMAGE_PROCESSOR_MAPPING.register(_UpperCAmelCase ,_UpperCAmelCase )
| 89 | 0 |
lowercase__ : Dict = {
'''A''': ['''B''', '''C''', '''E'''],
'''B''': ['''A''', '''D''', '''E'''],
'''C''': ['''A''', '''F''', '''G'''],
'''D''': ['''B'''],
'''E''': ['''A''', '''B''', '''D'''],
'''F''': ['''C'''],
'''G''': ['''C'''],
}
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> list[str]:
lowerCAmelCase = set()
# keep track of all the paths to be checked
lowerCAmelCase = [[start]]
# return path if start is goal
if start == goal:
return [start]
# keeps looping until all possible paths have been checked
while queue:
# pop the first path from the queue
lowerCAmelCase = queue.pop(0 )
# get the last node from the path
lowerCAmelCase = path[-1]
if node not in explored:
lowerCAmelCase = graph[node]
# go through all neighbour nodes, construct a new path and
# push it into the queue
for neighbour in neighbours:
lowerCAmelCase = list(lowerCAmelCase_ )
new_path.append(lowerCAmelCase_ )
queue.append(lowerCAmelCase_ )
# return path if neighbour is goal
if neighbour == goal:
return new_path
# mark node as explored
explored.add(lowerCAmelCase_ )
# in case there's no path between the 2 nodes
return []
def SCREAMING_SNAKE_CASE_ ( snake_case__ , snake_case__ , snake_case__ ) -> int:
if not graph or start not in graph or target not in graph:
return -1
if start == target:
return 0
lowerCAmelCase = [start]
lowerCAmelCase = set(lowerCAmelCase_ )
# Keep tab on distances from `start` node.
lowerCAmelCase = {start: 0, target: -1}
while queue:
lowerCAmelCase = queue.pop(0 )
if node == target:
lowerCAmelCase = (
dist[node] if dist[target] == -1 else min(dist[target] , dist[node] )
)
for adjacent in graph[node]:
if adjacent not in visited:
visited.add(lowerCAmelCase_ )
queue.append(lowerCAmelCase_ )
lowerCAmelCase = dist[node] + 1
return dist[target]
if __name__ == "__main__":
print(bfs_shortest_path(demo_graph, '''G''', '''D''')) # returns ['G', 'C', 'A', 'B', 'D']
print(bfs_shortest_path_distance(demo_graph, '''G''', '''D''')) # returns 4
| 338 |
'''simple docstring'''
import os
import sys
import warnings
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen
from ..table import array_cast
from ..utils.file_utils import is_local_path
from ..utils.py_utils import first_non_null_value, no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
import PIL.Image
from .features import FeatureType
__lowerCAmelCase = None
__lowerCAmelCase = '''<''' if sys.byteorder == '''little''' else '''>'''
# Origin: https://github.com/python-pillow/Pillow/blob/698951e19e19972aeed56df686868f1329981c12/src/PIL/Image.py#L3126 minus "|i1" which values are not preserved correctly when saving and loading an image
__lowerCAmelCase = [
np.dtype('''|b1'''),
np.dtype('''|u1'''),
np.dtype('''<u2'''),
np.dtype('''>u2'''),
np.dtype('''<i2'''),
np.dtype('''>i2'''),
np.dtype('''<u4'''),
np.dtype('''>u4'''),
np.dtype('''<i4'''),
np.dtype('''>i4'''),
np.dtype('''<f4'''),
np.dtype('''>f4'''),
np.dtype('''<f8'''),
np.dtype('''>f8'''),
]
@dataclass
class __magic_name__ :
lowerCAmelCase : bool = True
lowerCAmelCase : Optional[str] = None
# Automatically constructed
lowerCAmelCase : ClassVar[str] = "PIL.Image.Image"
lowerCAmelCase : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()} )
lowerCAmelCase : str = field(default='Image' , init=_UpperCamelCase , repr=_UpperCamelCase )
def __call__( self : Union[str, Any] ):
return self.pa_type
def __lowercase ( self : Any ,_UpperCAmelCase : Union[str, bytes, dict, np.ndarray, "PIL.Image.Image"] ):
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : Optional[Any] = np.array(_UpperCAmelCase )
if isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
return {"path": value, "bytes": None}
elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
return {"path": None, "bytes": value}
elif isinstance(_UpperCAmelCase ,np.ndarray ):
# convert the image array to PNG/TIFF bytes
return encode_np_array(_UpperCAmelCase )
elif isinstance(_UpperCAmelCase ,PIL.Image.Image ):
# convert the PIL image to bytes (default format is PNG/TIFF)
return encode_pil_image(_UpperCAmelCase )
elif value.get('path' ) is not None and os.path.isfile(value['path'] ):
# we set "bytes": None to not duplicate the data if they're already available locally
return {"bytes": None, "path": value.get('path' )}
elif value.get('bytes' ) is not None or value.get('path' ) is not None:
# store the image bytes, and path is used to infer the image format using the file extension
return {"bytes": value.get('bytes' ), "path": value.get('path' )}
else:
raise ValueError(
F"""An image sample should have one of 'path' or 'bytes' but they are missing or None in {value}.""" )
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : dict ,_UpperCAmelCase : Optional[int]=None ):
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Image(decode=True) instead.' )
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support decoding images, please install \'Pillow\'.' )
if token_per_repo_id is None:
_a : Dict = {}
_a , _a : str = value['path'], value['bytes']
if bytes_ is None:
if path is None:
raise ValueError(F"""An image should have one of 'path' or 'bytes' but both are None in {value}.""" )
else:
if is_local_path(_UpperCAmelCase ):
_a : Any = PIL.Image.open(_UpperCAmelCase )
else:
_a : List[Any] = path.split('::' )[-1]
try:
_a : str = string_to_dict(_UpperCAmelCase ,config.HUB_DATASETS_URL )['repo_id']
_a : Optional[Any] = token_per_repo_id.get(_UpperCAmelCase )
except ValueError:
_a : int = None
with xopen(_UpperCAmelCase ,'rb' ,use_auth_token=_UpperCAmelCase ) as f:
_a : Tuple = BytesIO(f.read() )
_a : Union[str, Any] = PIL.Image.open(bytes_ )
else:
_a : Optional[int] = PIL.Image.open(BytesIO(bytes_ ) )
image.load() # to avoid "Too many open files" errors
return image
def __lowercase ( self : int ):
from .features import Value
return (
self
if self.decode
else {
"bytes": Value('binary' ),
"path": Value('string' ),
}
)
def __lowercase ( self : str ,_UpperCAmelCase : Union[pa.StringArray, pa.StructArray, pa.ListArray] ):
if pa.types.is_string(storage.type ):
_a : Union[str, Any] = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.binary() )
_a : Union[str, Any] = pa.StructArray.from_arrays([bytes_array, storage] ,['bytes', 'path'] ,mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
_a : List[str] = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() )
_a : Any = pa.StructArray.from_arrays([storage, path_array] ,['bytes', 'path'] ,mask=storage.is_null() )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('bytes' ) >= 0:
_a : Union[str, Any] = storage.field('bytes' )
else:
_a : Tuple = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.binary() )
if storage.type.get_field_index('path' ) >= 0:
_a : Union[str, Any] = storage.field('path' )
else:
_a : Dict = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() )
_a : Optional[Any] = pa.StructArray.from_arrays([bytes_array, path_array] ,['bytes', 'path'] ,mask=storage.is_null() )
elif pa.types.is_list(storage.type ):
_a : List[str] = pa.array(
[encode_np_array(np.array(_UpperCAmelCase ) )['bytes'] if arr is not None else None for arr in storage.to_pylist()] ,type=pa.binary() ,)
_a : int = pa.array([None] * len(_UpperCAmelCase ) ,type=pa.string() )
_a : Optional[Any] = pa.StructArray.from_arrays(
[bytes_array, path_array] ,['bytes', 'path'] ,mask=bytes_array.is_null() )
return array_cast(_UpperCAmelCase ,self.pa_type )
def __lowercase ( self : Dict ,_UpperCAmelCase : pa.StructArray ):
@no_op_if_value_is_null
def path_to_bytes(_UpperCAmelCase : Tuple ):
with xopen(_UpperCAmelCase ,'rb' ) as f:
_a : int = f.read()
return bytes_
_a : Any = pa.array(
[
(path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] ,type=pa.binary() ,)
_a : Optional[Any] = pa.array(
[os.path.basename(_UpperCAmelCase ) if path is not None else None for path in storage.field('path' ).to_pylist()] ,type=pa.string() ,)
_a : Dict = pa.StructArray.from_arrays([bytes_array, path_array] ,['bytes', 'path'] ,mask=bytes_array.is_null() )
return array_cast(_UpperCAmelCase ,self.pa_type )
def __lowerCamelCase ( ) -> List[str]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
global _IMAGE_COMPRESSION_FORMATS
if _IMAGE_COMPRESSION_FORMATS is None:
PIL.Image.init()
_a : Dict = list(set(PIL.Image.OPEN.keys() ) & set(PIL.Image.SAVE.keys() ) )
return _IMAGE_COMPRESSION_FORMATS
def __lowerCamelCase ( lowerCAmelCase_ ) -> bytes:
_a : Optional[int] = BytesIO()
if image.format in list_image_compression_formats():
_a : Optional[Any] = image.format
else:
_a : str = 'PNG' if image.mode in ['1', 'L', 'LA', 'RGB', 'RGBA'] else 'TIFF'
image.save(lowerCAmelCase_ , format=lowerCAmelCase_ )
return buffer.getvalue()
def __lowerCamelCase ( lowerCAmelCase_ ) -> dict:
if hasattr(lowerCAmelCase_ , 'filename' ) and image.filename != "":
return {"path": image.filename, "bytes": None}
else:
return {"path": None, "bytes": image_to_bytes(lowerCAmelCase_ )}
def __lowerCamelCase ( lowerCAmelCase_ ) -> dict:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
_a : List[Any] = array.dtype
_a : Optional[int] = dtype.byteorder if dtype.byteorder != '=' else _NATIVE_BYTEORDER
_a : Union[str, Any] = dtype.kind
_a : Union[str, Any] = dtype.itemsize
_a : List[Any] = None
# Multi-channel array case (only np.dtype("|u1") is allowed)
if array.shape[2:]:
_a : Optional[int] = np.dtype('|u1' )
if dtype_kind not in ["u", "i"]:
raise TypeError(
f"""Unsupported array dtype {dtype} for image encoding. Only {dest_dtype} is supported for multi-channel arrays.""" )
if dtype is not dest_dtype:
warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
# Exact match
elif dtype in _VALID_IMAGE_ARRAY_DTPYES:
_a : Union[str, Any] = dtype
else: # Downcast the type within the kind (np.can_cast(from_type, to_type, casting="same_kind") doesn't behave as expected, so do it manually)
while dtype_itemsize >= 1:
_a : str = dtype_byteorder + dtype_kind + str(lowerCAmelCase_ )
_a : List[Any] = np.dtype(lowerCAmelCase_ )
if dest_dtype in _VALID_IMAGE_ARRAY_DTPYES:
warnings.warn(f"""Downcasting array dtype {dtype} to {dest_dtype} to be compatible with 'Pillow'""" )
break
else:
dtype_itemsize //= 2
if dest_dtype is None:
raise TypeError(
f"""Cannot convert dtype {dtype} to a valid image dtype. Valid image dtypes: {_VALID_IMAGE_ARRAY_DTPYES}""" )
_a : Union[str, Any] = PIL.Image.fromarray(array.astype(lowerCAmelCase_ ) )
return {"path": None, "bytes": image_to_bytes(lowerCAmelCase_ )}
def __lowerCamelCase ( lowerCAmelCase_ ) -> List[dict]:
if config.PIL_AVAILABLE:
import PIL.Image
else:
raise ImportError('To support encoding images, please install \'Pillow\'.' )
if objs:
_a , _a : Optional[Any] = first_non_null_value(lowerCAmelCase_ )
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return [{"path": obj, "bytes": None} if obj is not None else None for obj in objs]
if isinstance(lowerCAmelCase_ , np.ndarray ):
_a : List[str] = no_op_if_value_is_null(lowerCAmelCase_ )
return [obj_to_image_dict_func(lowerCAmelCase_ ) for obj in objs]
elif isinstance(lowerCAmelCase_ , PIL.Image.Image ):
_a : List[str] = no_op_if_value_is_null(lowerCAmelCase_ )
return [obj_to_image_dict_func(lowerCAmelCase_ ) for obj in objs]
else:
return objs
else:
return objs
| 89 | 0 |
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
__snake_case : Optional[int] = {
'text_branch': 'text_model',
'audio_branch': 'audio_model.audio_encoder',
'attn': 'attention.self',
'self.proj': 'output.dense',
'attention.self_mask': 'attn_mask',
'mlp.fc1': 'intermediate.dense',
'mlp.fc2': 'output.dense',
'norm1': 'layernorm_before',
'norm2': 'layernorm_after',
'bn0': 'batch_norm',
}
__snake_case : List[Any] = AutoFeatureExtractor.from_pretrained('laion/clap-htsat-unfused', truncation='rand_trunc')
def _lowercase ( __snake_case ,__snake_case=False ) -> Any:
__lowerCAmelCase : Union[str, Any] = create_model(
"HTSAT-tiny" ,"roberta" ,lowerCAmelCase_ ,precision="fp32" ,device="cuda:0" if torch.cuda.is_available() else "cpu" ,enable_fusion=lowerCAmelCase_ ,fusion_type="aff_2d" if enable_fusion else None ,)
return model, model_cfg
def _lowercase ( __snake_case ) -> int:
__lowerCAmelCase : Optional[int] = {}
__lowerCAmelCase : Tuple = r'.*sequential.(\d+).*'
__lowerCAmelCase : int = r'.*_projection.(\d+).*'
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__lowerCAmelCase : Union[str, Any] = key.replace(lowerCAmelCase_ ,lowerCAmelCase_ )
if re.match(lowerCAmelCase_ ,lowerCAmelCase_ ):
# replace sequential layers with list
__lowerCAmelCase : List[str] = re.match(lowerCAmelCase_ ,lowerCAmelCase_ ).group(1 )
__lowerCAmelCase : Optional[Any] = key.replace(F"""sequential.{sequential_layer}.""" ,F"""layers.{int(lowerCAmelCase_ )//3}.linear.""" )
elif re.match(lowerCAmelCase_ ,lowerCAmelCase_ ):
__lowerCAmelCase : str = int(re.match(lowerCAmelCase_ ,lowerCAmelCase_ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
__lowerCAmelCase : Optional[Any] = 1 if projecton_layer == 0 else 2
__lowerCAmelCase : int = key.replace(F"""_projection.{projecton_layer}.""" ,F"""_projection.linear{transformers_projection_layer}.""" )
if "audio" and "qkv" in key:
# split qkv into query key and value
__lowerCAmelCase : str = value
__lowerCAmelCase : List[str] = mixed_qkv.size(0 ) // 3
__lowerCAmelCase : str = mixed_qkv[:qkv_dim]
__lowerCAmelCase : int = mixed_qkv[qkv_dim : qkv_dim * 2]
__lowerCAmelCase : Any = mixed_qkv[qkv_dim * 2 :]
__lowerCAmelCase : List[Any] = query_layer
__lowerCAmelCase : Union[str, Any] = key_layer
__lowerCAmelCase : Tuple = value_layer
else:
__lowerCAmelCase : Dict = value
return model_state_dict
def _lowercase ( __snake_case ,__snake_case ,__snake_case ,__snake_case=False ) -> int:
__lowerCAmelCase : Optional[Any] = init_clap(lowerCAmelCase_ ,enable_fusion=lowerCAmelCase_ )
clap_model.eval()
__lowerCAmelCase : Tuple = clap_model.state_dict()
__lowerCAmelCase : Optional[int] = rename_state_dict(lowerCAmelCase_ )
__lowerCAmelCase : List[str] = ClapConfig()
__lowerCAmelCase : Tuple = enable_fusion
__lowerCAmelCase : int = ClapModel(lowerCAmelCase_ )
# ignore the spectrogram embedding layer
model.load_state_dict(lowerCAmelCase_ ,strict=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
transformers_config.save_pretrained(lowerCAmelCase_ )
if __name__ == "__main__":
__snake_case : List[str] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument('--enable_fusion', action='store_true', help='Whether to enable fusion or not')
__snake_case : Any = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion) | 269 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str | Literal[False]:
_a : Optional[int] = list(lowerCAmelCase_ )
_a : Optional[Any] = list(lowerCAmelCase_ )
_a : Union[str, Any] = 0
for i in range(len(lowerCAmelCase_ ) ):
if lista[i] != lista[i]:
count += 1
_a : Optional[int] = '_'
if count > 1:
return False
else:
return "".join(lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ ) -> list[str]:
_a : Optional[int] = []
while True:
_a : Any = ['$'] * len(lowerCAmelCase_ )
_a : List[str] = []
for i in range(len(lowerCAmelCase_ ) ):
for j in range(i + 1 , len(lowerCAmelCase_ ) ):
_a : Optional[int] = compare_string(binary[i] , binary[j] )
if k is False:
_a : Optional[Any] = '*'
_a : Optional[Any] = '*'
temp.append('X' )
for i in range(len(lowerCAmelCase_ ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(lowerCAmelCase_ ) == 0:
return pi
_a : Any = list(set(lowerCAmelCase_ ) )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]:
_a : int = []
for minterm in minterms:
_a : Optional[int] = ''
for _ in range(lowerCAmelCase_ ):
_a : Union[str, Any] = str(minterm % 2 ) + string
minterm //= 2
temp.append(lowerCAmelCase_ )
return temp
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ) -> bool:
_a : int = list(lowerCAmelCase_ )
_a : Union[str, Any] = list(lowerCAmelCase_ )
_a : str = 0
for i in range(len(lowerCAmelCase_ ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[str]:
_a : List[Any] = []
_a : Optional[Any] = [0] * len(lowerCAmelCase_ )
for i in range(len(chart[0] ) ):
_a : Union[str, Any] = 0
_a : int = -1
for j in range(len(lowerCAmelCase_ ) ):
if chart[j][i] == 1:
count += 1
_a : int = j
if count == 1:
_a : List[Any] = 1
for i in range(len(lowerCAmelCase_ ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(lowerCAmelCase_ ) ):
_a : Any = 0
temp.append(prime_implicants[i] )
while True:
_a : Union[str, Any] = 0
_a : List[Any] = -1
_a : str = 0
for i in range(len(lowerCAmelCase_ ) ):
_a : Union[str, Any] = chart[i].count(1 )
if count_n > max_n:
_a : Any = count_n
_a : int = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(lowerCAmelCase_ ) ):
_a : List[str] = 0
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> list[list[int]]:
_a : int = [[0 for x in range(len(lowerCAmelCase_ ) )] for x in range(len(lowerCAmelCase_ ) )]
for i in range(len(lowerCAmelCase_ ) ):
_a : str = prime_implicants[i].count('_' )
for j in range(len(lowerCAmelCase_ ) ):
if is_for_table(prime_implicants[i] , binary[j] , lowerCAmelCase_ ):
_a : Optional[Any] = 1
return chart
def __lowerCamelCase ( ) -> None:
_a : Optional[int] = int(input('Enter the no. of variables\n' ) )
_a : List[Any] = [
float(lowerCAmelCase_ )
for x in input(
'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split()
]
_a : List[str] = decimal_to_binary(lowerCAmelCase_ , lowerCAmelCase_ )
_a : Dict = check(lowerCAmelCase_ )
print('Prime Implicants are:' )
print(lowerCAmelCase_ )
_a : List[Any] = prime_implicant_chart(lowerCAmelCase_ , lowerCAmelCase_ )
_a : int = selection(lowerCAmelCase_ , lowerCAmelCase_ )
print('Essential Prime Implicants are:' )
print(lowerCAmelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 89 | 0 |
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
__lowerCamelCase : List[Any] = """tiny-wmt19-en-ru"""
# Build
# borrowed from a test
__lowerCamelCase : int = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
__lowerCamelCase : Tuple = dict(zip(vocab, range(len(vocab))))
__lowerCamelCase : Optional[Any] = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
with tempfile.TemporaryDirectory() as tmpdirname:
__lowerCamelCase : Tuple = Path(tmpdirname)
__lowerCamelCase : int = build_dir / VOCAB_FILES_NAMES["""src_vocab_file"""]
__lowerCamelCase : Any = build_dir / VOCAB_FILES_NAMES["""tgt_vocab_file"""]
__lowerCamelCase : List[str] = build_dir / VOCAB_FILES_NAMES["""merges_file"""]
with open(src_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, """w""") as fp:
fp.write("""\n""".join(merges))
__lowerCamelCase : List[str] = FSMTTokenizer(
langs=["""en""", """ru"""],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
__lowerCamelCase : Any = FSMTConfig(
langs=["""ru""", """en"""],
src_vocab_size=1000,
tgt_vocab_size=1000,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
__lowerCamelCase : List[Any] = FSMTForConditionalGeneration(config)
print(f"""num of params {tiny_model.num_parameters()}""")
# Test
__lowerCamelCase : Tuple = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
__lowerCamelCase : Optional[Any] = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 52 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team and The OpenBMB Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
__lowerCAmelCase = {
'''configuration_cpmant''': ['''CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''CpmAntConfig'''],
'''tokenization_cpmant''': ['''CpmAntTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase = [
'''CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CpmAntForCausalLM''',
'''CpmAntModel''',
'''CpmAntPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_cpmant import CPMANT_PRETRAINED_CONFIG_ARCHIVE_MAP, CpmAntConfig
from .tokenization_cpmant import CpmAntTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_cpmant import (
CPMANT_PRETRAINED_MODEL_ARCHIVE_LIST,
CpmAntForCausalLM,
CpmAntModel,
CpmAntPreTrainedModel,
)
else:
import sys
__lowerCAmelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 89 | 0 |
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
__lowerCamelCase : Dict = '''0.12''' # assumed parallelism: 8
if is_torch_available():
import torch
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : int=None ) -> Optional[Any]:
"""simple docstring"""
if rng is None:
SCREAMING_SNAKE_CASE__ = random.Random()
SCREAMING_SNAKE_CASE__ = 1
for dim in shape:
total_dims *= dim
SCREAMING_SNAKE_CASE__ = []
for _ in range(lowerCAmelCase_ ):
values.append(rng.randint(0 , vocab_size - 1 ) )
SCREAMING_SNAKE_CASE__ = np.array(lowerCAmelCase_ , dtype=jnp.intaa ).reshape(lowerCAmelCase_ )
return output
def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any]=None ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = ids_tensor(lowerCAmelCase_ , vocab_size=2 , rng=lowerCAmelCase_ )
# make sure that at least one token is attended to for each batch
SCREAMING_SNAKE_CASE__ = 1
return attn_mask
@require_flax
class __snake_case :
lowerCAmelCase_ = None
lowerCAmelCase_ = ()
def __a ( self : List[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = inputs['input_ids'].shape[-1] // 2
SCREAMING_SNAKE_CASE__ = inputs['input_ids'][:max_batch_size, :sequence_length]
SCREAMING_SNAKE_CASE__ = jnp.ones_like(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
SCREAMING_SNAKE_CASE__ = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
SCREAMING_SNAKE_CASE__ = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = max_length
SCREAMING_SNAKE_CASE__ = 0
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model_class.__name__[4:] # Skip the "Flax" at the beginning
SCREAMING_SNAKE_CASE__ = getattr(_UpperCAmelCase , _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = pt_model_class(_UpperCAmelCase ).eval()
SCREAMING_SNAKE_CASE__ = load_flax_weights_in_pytorch_model(_UpperCAmelCase , flax_model.params )
SCREAMING_SNAKE_CASE__ = flax_model.generate(_UpperCAmelCase ).sequences
SCREAMING_SNAKE_CASE__ = pt_model.generate(torch.tensor(_UpperCAmelCase , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
SCREAMING_SNAKE_CASE__ = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def __a ( self : Any ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = jit(model.generate )
SCREAMING_SNAKE_CASE__ = jit_generate(_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __a ( self : List[str] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = jit(model.generate )
SCREAMING_SNAKE_CASE__ = jit_generate(_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __a ( self : Tuple ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = max_length
SCREAMING_SNAKE_CASE__ = 2
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = jit(model.generate )
SCREAMING_SNAKE_CASE__ = jit_generate(_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = max_length
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 2
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def __a ( self : str ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = max_length
SCREAMING_SNAKE_CASE__ = 0.8
SCREAMING_SNAKE_CASE__ = 10
SCREAMING_SNAKE_CASE__ = 0.3
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 8
SCREAMING_SNAKE_CASE__ = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = jit(model.generate )
SCREAMING_SNAKE_CASE__ = jit_generate(_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __a ( self : int ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE__ = max_length
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 8
SCREAMING_SNAKE_CASE__ = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = jit(model.generate )
SCREAMING_SNAKE_CASE__ = jit_generate(_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __a ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self._get_input_ids_and_config()
SCREAMING_SNAKE_CASE__ = max_length
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = 1
SCREAMING_SNAKE_CASE__ = 8
SCREAMING_SNAKE_CASE__ = 9
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model.generate(_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = jit(model.generate )
SCREAMING_SNAKE_CASE__ = jit_generate(_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE__ = attention_mask.at[(0, 0)].set(0 )
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model.generate(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = jit(model.generate )
SCREAMING_SNAKE_CASE__ = jit_generate(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __a ( self : Optional[Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE__ = attention_mask.at[(0, 0)].set(0 )
SCREAMING_SNAKE_CASE__ = True
SCREAMING_SNAKE_CASE__ = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model.generate(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = jit(model.generate )
SCREAMING_SNAKE_CASE__ = jit_generate(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def __a ( self : Optional[int] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = self._get_input_ids_and_config()
# pad attention mask on the left
SCREAMING_SNAKE_CASE__ = attention_mask.at[(0, 0)].set(0 )
SCREAMING_SNAKE_CASE__ = 2
SCREAMING_SNAKE_CASE__ = max_length
for model_class in self.all_generative_model_classes:
SCREAMING_SNAKE_CASE__ = model_class(_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = model.generate(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ = jit(model.generate )
SCREAMING_SNAKE_CASE__ = jit_generate(_UpperCAmelCase , attention_mask=_UpperCAmelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class __snake_case ( unittest.TestCase ):
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-bert""" )
SCREAMING_SNAKE_CASE__ = FlaxAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
SCREAMING_SNAKE_CASE__ = 'Hello world'
SCREAMING_SNAKE_CASE__ = tokenizer(_UpperCAmelCase , return_tensors="""np""" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(_UpperCAmelCase , """do_samples""" ):
model.generate(_UpperCAmelCase , do_samples=_UpperCAmelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(_UpperCAmelCase , """foo""" ):
SCREAMING_SNAKE_CASE__ = {'foo': 'bar'}
model.generate(_UpperCAmelCase , **_UpperCAmelCase )
| 219 |
'''simple docstring'''
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ ( _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : str = LayoutLMTokenizer
lowerCAmelCase : Tuple = LayoutLMTokenizerFast
lowerCAmelCase : List[Any] = True
lowerCAmelCase : int = True
def __lowercase ( self : Dict ):
super().setUp()
_a : int = [
'[UNK]',
'[CLS]',
'[SEP]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_a : List[str] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def __lowercase ( self : Dict ,**_UpperCAmelCase : List[str] ):
return LayoutLMTokenizer.from_pretrained(self.tmpdirname ,**_UpperCAmelCase )
def __lowercase ( self : Optional[Any] ,_UpperCAmelCase : Tuple ):
_a : Optional[int] = 'UNwant\u00E9d,running'
_a : List[Any] = 'unwanted, running'
return input_text, output_text
def __lowercase ( self : Optional[int] ):
_a : Optional[Any] = self.tokenizer_class(self.vocab_file )
_a : Optional[Any] = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_UpperCAmelCase ,['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_UpperCAmelCase ) ,[7, 4, 5, 10, 8, 9] )
def __lowercase ( self : Optional[int] ):
pass
| 89 | 0 |
"""simple docstring"""
def lowercase__ ( _UpperCAmelCase = 1_00 ) -> int:
'''simple docstring'''
lowercase : List[str] = (n * (n + 1) // 2) ** 2
lowercase : str = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(f'''{solution() = }''')
| 255 |
'''simple docstring'''
import copy
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'''microsoft/conditional-detr-resnet-50''': (
'''https://huggingface.co/microsoft/conditional-detr-resnet-50/resolve/main/config.json'''
),
}
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : Any = 'conditional_detr'
lowerCAmelCase : List[str] = ['past_key_values']
lowerCAmelCase : Optional[int] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Optional[int] ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Union[str, Any]=None ,_UpperCAmelCase : List[Any]=3 ,_UpperCAmelCase : List[Any]=300 ,_UpperCAmelCase : Dict=6 ,_UpperCAmelCase : List[str]=2048 ,_UpperCAmelCase : Optional[int]=8 ,_UpperCAmelCase : List[Any]=6 ,_UpperCAmelCase : Optional[int]=2048 ,_UpperCAmelCase : Dict=8 ,_UpperCAmelCase : int=0.0 ,_UpperCAmelCase : Optional[Any]=0.0 ,_UpperCAmelCase : Optional[Any]=True ,_UpperCAmelCase : str="relu" ,_UpperCAmelCase : Tuple=256 ,_UpperCAmelCase : Optional[int]=0.1 ,_UpperCAmelCase : str=0.0 ,_UpperCAmelCase : Optional[int]=0.0 ,_UpperCAmelCase : Union[str, Any]=0.02 ,_UpperCAmelCase : List[str]=1.0 ,_UpperCAmelCase : Any=False ,_UpperCAmelCase : int="sine" ,_UpperCAmelCase : List[str]="resnet50" ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : str=False ,_UpperCAmelCase : str=2 ,_UpperCAmelCase : int=5 ,_UpperCAmelCase : Optional[int]=2 ,_UpperCAmelCase : str=1 ,_UpperCAmelCase : Union[str, Any]=1 ,_UpperCAmelCase : List[str]=2 ,_UpperCAmelCase : Union[str, Any]=5 ,_UpperCAmelCase : List[Any]=2 ,_UpperCAmelCase : Optional[int]=0.25 ,**_UpperCAmelCase : Tuple ,):
if backbone_config is not None and use_timm_backbone:
raise ValueError('You can\'t specify both `backbone_config` and `use_timm_backbone`.' )
if not use_timm_backbone:
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
_a : Optional[Any] = CONFIG_MAPPING['resnet'](out_features=['stage4'] )
elif isinstance(_UpperCAmelCase ,_UpperCAmelCase ):
_a : str = backbone_config.get('model_type' )
_a : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
_a : List[Any] = config_class.from_dict(_UpperCAmelCase )
_a : Tuple = use_timm_backbone
_a : Union[str, Any] = backbone_config
_a : List[Any] = num_channels
_a : Union[str, Any] = num_queries
_a : Optional[Any] = d_model
_a : Tuple = encoder_ffn_dim
_a : Dict = encoder_layers
_a : List[str] = encoder_attention_heads
_a : Union[str, Any] = decoder_ffn_dim
_a : Optional[int] = decoder_layers
_a : int = decoder_attention_heads
_a : Optional[int] = dropout
_a : Tuple = attention_dropout
_a : List[Any] = activation_dropout
_a : str = activation_function
_a : Optional[Any] = init_std
_a : Union[str, Any] = init_xavier_std
_a : List[Any] = encoder_layerdrop
_a : List[Any] = decoder_layerdrop
_a : Dict = encoder_layers
_a : List[Any] = auxiliary_loss
_a : Optional[int] = position_embedding_type
_a : List[Any] = backbone
_a : Optional[int] = use_pretrained_backbone
_a : Optional[int] = dilation
# Hungarian matcher
_a : Tuple = class_cost
_a : str = bbox_cost
_a : Any = giou_cost
# Loss coefficients
_a : Tuple = mask_loss_coefficient
_a : Dict = dice_loss_coefficient
_a : Tuple = cls_loss_coefficient
_a : Any = bbox_loss_coefficient
_a : Dict = giou_loss_coefficient
_a : Union[str, Any] = focal_alpha
super().__init__(is_encoder_decoder=_UpperCAmelCase ,**_UpperCAmelCase )
@property
def __lowercase ( self : Dict ):
return self.encoder_attention_heads
@property
def __lowercase ( self : str ):
return self.d_model
def __lowercase ( self : int ):
_a : List[str] = copy.deepcopy(self.__dict__ )
if self.backbone_config is not None:
_a : Dict = self.backbone_config.to_dict()
_a : Union[str, Any] = self.__class__.model_type
return output
class __magic_name__ ( _UpperCamelCase ):
lowerCAmelCase : str = version.parse('1.11' )
@property
def __lowercase ( self : Dict ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
('pixel_mask', {0: 'batch'}),
] )
@property
def __lowercase ( self : Any ):
return 1E-5
@property
def __lowercase ( self : List[Any] ):
return 12
| 89 | 0 |
'''simple docstring'''
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : Dict = set()
# edges = list of graph's edges
UpperCAmelCase : Any = get_edges(lowerCAmelCase_ )
# While there are still elements in edges list, take an arbitrary edge
# (from_node, to_node) and add his extremity to chosen_vertices and then
# remove all arcs adjacent to the from_node and to_node
while edges:
UpperCAmelCase : Optional[int] = edges.pop()
chosen_vertices.add(lowerCAmelCase_ )
chosen_vertices.add(lowerCAmelCase_ )
for edge in edges.copy():
if from_node in edge or to_node in edge:
edges.discard(lowerCAmelCase_ )
return chosen_vertices
def lowercase ( __magic_name__ ):
'''simple docstring'''
UpperCAmelCase : List[Any] = set()
for from_node, to_nodes in graph.items():
for to_node in to_nodes:
edges.add((from_node, to_node) )
return edges
if __name__ == "__main__":
import doctest
doctest.testmod()
# graph = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
# print(f"Matching vertex cover:\n{matching_min_vertex_cover(graph)}")
| 311 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __magic_name__ :
def __init__( self : List[str] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : List[str]=13 ,_UpperCAmelCase : Any=32 ,_UpperCAmelCase : Union[str, Any]=3 ,_UpperCAmelCase : Optional[int]=4 ,_UpperCAmelCase : Optional[Any]=[10, 20, 30, 40] ,_UpperCAmelCase : Tuple=[2, 2, 3, 2] ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Optional[int]=True ,_UpperCAmelCase : Union[str, Any]=37 ,_UpperCAmelCase : Optional[int]="gelu" ,_UpperCAmelCase : Optional[Any]=10 ,_UpperCAmelCase : Tuple=0.02 ,_UpperCAmelCase : Any=["stage2", "stage3", "stage4"] ,_UpperCAmelCase : Any=[2, 3, 4] ,_UpperCAmelCase : Tuple=None ,):
_a : Optional[Any] = parent
_a : List[Any] = batch_size
_a : str = image_size
_a : Union[str, Any] = num_channels
_a : List[Any] = num_stages
_a : Dict = hidden_sizes
_a : int = depths
_a : Tuple = is_training
_a : List[str] = use_labels
_a : Dict = intermediate_size
_a : int = hidden_act
_a : int = num_labels
_a : Any = initializer_range
_a : Tuple = out_features
_a : int = out_indices
_a : List[Any] = scope
def __lowercase ( self : Dict ):
_a : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_a : Union[str, Any] = None
if self.use_labels:
_a : Tuple = ids_tensor([self.batch_size] ,self.num_labels )
_a : str = self.get_config()
return config, pixel_values, labels
def __lowercase ( self : Any ):
return ConvNextVaConfig(
num_channels=self.num_channels ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_stages=self.num_stages ,hidden_act=self.hidden_act ,is_decoder=_UpperCAmelCase ,initializer_range=self.initializer_range ,out_features=self.out_features ,out_indices=self.out_indices ,num_labels=self.num_labels ,)
def __lowercase ( self : Tuple ,_UpperCAmelCase : Any ,_UpperCAmelCase : Any ,_UpperCAmelCase : Optional[Any] ):
_a : Optional[Any] = ConvNextVaModel(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Any = model(_UpperCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def __lowercase ( self : Tuple ,_UpperCAmelCase : Union[str, Any] ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : int ):
_a : List[Any] = ConvNextVaForImageClassification(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : List[str] = model(_UpperCAmelCase ,labels=_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def __lowercase ( self : str ,_UpperCAmelCase : List[Any] ,_UpperCAmelCase : str ,_UpperCAmelCase : Optional[Any] ):
_a : Optional[int] = ConvNextVaBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : Dict = model(_UpperCAmelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] )
# verify backbone works with out_features=None
_a : Tuple = None
_a : List[Any] = ConvNextVaBackbone(config=_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
_a : List[str] = model(_UpperCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def __lowercase ( self : Optional[Any] ):
_a : Any = self.prepare_config_and_inputs()
_a , _a , _a : Union[str, Any] = config_and_inputs
_a : Any = {'pixel_values': pixel_values}
return config, inputs_dict
def __lowercase ( self : str ):
_a : Tuple = self.prepare_config_and_inputs()
_a , _a , _a : Tuple = config_and_inputs
_a : List[Any] = {'pixel_values': pixel_values, 'labels': labels}
return config, inputs_dict
@require_torch
class __magic_name__ ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
lowerCAmelCase : str = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
lowerCAmelCase : str = (
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase : int = False
lowerCAmelCase : str = False
lowerCAmelCase : Optional[Any] = False
lowerCAmelCase : List[str] = False
lowerCAmelCase : Optional[int] = False
def __lowercase ( self : List[Any] ):
_a : str = ConvNextVaModelTester(self )
_a : Tuple = ConfigTester(self ,config_class=_UpperCAmelCase ,has_text_modality=_UpperCAmelCase ,hidden_size=37 )
def __lowercase ( self : Optional[Any] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowercase ( self : str ):
return
@unittest.skip(reason='ConvNextV2 does not use inputs_embeds' )
def __lowercase ( self : List[Any] ):
pass
@unittest.skip(reason='ConvNextV2 does not support input and output embeddings' )
def __lowercase ( self : Optional[int] ):
pass
@unittest.skip(reason='ConvNextV2 does not use feedforward chunking' )
def __lowercase ( self : Any ):
pass
def __lowercase ( self : List[str] ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
_a : Any = True
if model_class.__name__ in [
*get_values(_UpperCAmelCase ),
*get_values(_UpperCAmelCase ),
]:
continue
_a : Optional[Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.train()
_a : str = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase )
_a : Optional[int] = model(**_UpperCAmelCase ).loss
loss.backward()
def __lowercase ( self : str ):
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
_a , _a : Optional[Any] = self.model_tester.prepare_config_and_inputs_with_labels()
_a : Optional[int] = False
_a : Tuple = True
if (
model_class.__name__
in [*get_values(_UpperCAmelCase ), *get_values(_UpperCAmelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
_a : Tuple = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.gradient_checkpointing_enable()
model.train()
_a : Any = self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ,return_labels=_UpperCAmelCase )
_a : List[Any] = model(**_UpperCAmelCase ).loss
loss.backward()
def __lowercase ( self : List[Any] ):
_a , _a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : int = model_class(_UpperCAmelCase )
_a : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_a : Dict = [*signature.parameters.keys()]
_a : int = ['pixel_values']
self.assertListEqual(arg_names[:1] ,_UpperCAmelCase )
def __lowercase ( self : int ):
_a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def __lowercase ( self : Any ):
def check_hidden_states_output(_UpperCAmelCase : List[Any] ,_UpperCAmelCase : Tuple ,_UpperCAmelCase : Dict ):
_a : Union[str, Any] = model_class(_UpperCAmelCase )
model.to(_UpperCAmelCase )
model.eval()
with torch.no_grad():
_a : List[Any] = model(**self._prepare_for_class(_UpperCAmelCase ,_UpperCAmelCase ) )
_a : Any = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
_a : str = self.model_tester.num_stages
self.assertEqual(len(_UpperCAmelCase ) ,expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
_a , _a : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_a : int = True
check_hidden_states_output(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_a : Optional[Any] = True
check_hidden_states_output(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase )
def __lowercase ( self : List[Any] ):
_a : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_UpperCAmelCase )
@slow
def __lowercase ( self : int ):
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_a : Any = ConvNextVaModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
def __lowerCamelCase ( ) -> List[Any]:
_a : List[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __magic_name__ ( unittest.TestCase ):
@cached_property
def __lowercase ( self : Optional[Any] ):
return AutoImageProcessor.from_pretrained('facebook/convnextv2-tiny-1k-224' ) if is_vision_available() else None
@slow
def __lowercase ( self : Any ):
_a : List[str] = ConvNextVaForImageClassification.from_pretrained('facebook/convnextv2-tiny-1k-224' ).to(_UpperCAmelCase )
_a : Optional[int] = self.default_image_processor
_a : str = prepare_img()
_a : str = preprocessor(images=_UpperCAmelCase ,return_tensors='pt' ).to(_UpperCAmelCase )
# forward pass
with torch.no_grad():
_a : Dict = model(**_UpperCAmelCase )
# verify the logits
_a : Optional[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,_UpperCAmelCase )
_a : Optional[Any] = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(_UpperCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_UpperCAmelCase ,atol=1E-4 ) )
| 89 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.