code stringlengths 82 53.2k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
__snake_case : Tuple = get_tests_dir('fixtures/test_sentencepiece.model')
__snake_case : Dict = get_tests_dir('fixtures/test_sentencepiece_bpe.model')
__snake_case : Any = 'pt' if is_torch_available() else 'tf'
@require_sentencepiece
@require_tokenizers
class UpperCamelCase ( lowerCamelCase__ , unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : List[Any] =CamembertTokenizer
_lowerCamelCase : Tuple =CamembertTokenizerFast
_lowerCamelCase : Optional[int] =True
_lowerCamelCase : str =True
def A__ ( self : Any ):
super().setUp()
# We have a SentencePiece fixture for testing
A__ = CamembertTokenizer(SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(self.tmpdirname )
def A__ ( self : List[str] ):
A__ = '''<pad>'''
A__ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )
def A__ ( self : Optional[int] ):
A__ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '''<s>NOTUSED''' )
self.assertEqual(vocab_keys[1] , '''<pad>''' )
self.assertEqual(vocab_keys[-1] , '''<mask>''' )
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ) , 1_0_0_4 )
def A__ ( self : Dict ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_5 )
def A__ ( self : List[Any] ):
A__ = CamembertTokenizer(SCREAMING_SNAKE_CASE_ )
tokenizer.save_pretrained(self.tmpdirname )
A__ = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
A__ = '''I was born in 92000, and this is falsé.'''
A__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ )
A__ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
A__ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
A__ = tokenizer.convert_ids_to_tokens(SCREAMING_SNAKE_CASE_ )
A__ = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def A__ ( self : str ):
if not self.test_rust_tokenizer:
return
A__ = self.get_tokenizer()
A__ = self.get_rust_tokenizer()
A__ = '''I was born in 92000, and this is falsé.'''
A__ = tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
A__ = rust_tokenizer.tokenize(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
A__ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ , add_special_tokens=SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
A__ = self.get_rust_tokenizer()
A__ = tokenizer.encode(SCREAMING_SNAKE_CASE_ )
A__ = rust_tokenizer.encode(SCREAMING_SNAKE_CASE_ )
self.assertListEqual(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
@slow
def A__ ( self : Any ):
# fmt: off
A__ = {'''input_ids''': [[5, 5_4, 7_1_9_6, 2_9_7, 3_0, 2_3, 7_7_6, 1_8, 1_1, 3_2_1_5, 3_7_0_5, 8_2_5_2, 2_2, 3_1_6_4, 1_1_8_1, 2_1_1_6, 2_9, 1_6, 8_1_3, 2_5, 7_9_1, 3_3_1_4, 2_0, 3_4_4_6, 3_8, 2_7_5_7_5, 1_2_0, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_6_8, 1_7, 1_1, 9_0_8_8, 2_0, 1_5_1_7, 8, 2_2_8_0_4, 1_8_8_1_8, 1_0, 3_8, 6_2_9, 6_0_7, 6_0_7, 1_4_2, 1_9, 7_1_9_6, 8_6_7, 5_6, 1_0_3_2_6, 2_4, 2_2_6_7, 2_0, 4_1_6, 5_0_7_2, 1_5_6_1_2, 2_3_3, 7_3_4, 7, 2_3_9_9, 2_7, 1_6, 3_0_1_5, 1_6_4_9, 7, 2_4, 2_0, 4_3_3_8, 2_3_9_9, 2_7, 1_3, 3_4_0_0, 1_4, 1_3, 6_1_8_9, 8, 9_3_0, 9, 6]], '''attention_mask''': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
A__ = [
'''Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '''
'''utilisé principalement dans le domaine du traitement automatique des langues (TAL).''',
'''À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '''
'''pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '''
'''telles que la traduction et la synthèse de texte.''',
]
self.tokenizer_integration_test_util(
expected_encoding=SCREAMING_SNAKE_CASE_ , model_name='''camembert-base''' , revision='''3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf''' , sequences=SCREAMING_SNAKE_CASE_ , )
| 571 |
from typing import List, Optional, Union
import numpy as np
import torch
import torchaudio.compliance.kaldi as ta_kaldi
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowercase_ = logging.get_logger(__name__)
class _UpperCamelCase ( lowerCamelCase__ ):
'''simple docstring'''
_A = ["input_features", "attention_mask"]
def __init__( self : int , SCREAMING_SNAKE_CASE_ : int=8_0 , SCREAMING_SNAKE_CASE_ : Optional[Any]=1_6_0_0_0 , SCREAMING_SNAKE_CASE_ : Optional[Any]=8_0 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0.0 , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : Dict=True , SCREAMING_SNAKE_CASE_ : Any=True , **SCREAMING_SNAKE_CASE_ : str , ):
super().__init__(feature_size=SCREAMING_SNAKE_CASE_ , sampling_rate=SCREAMING_SNAKE_CASE_ , padding_value=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
_a = num_mel_bins
_a = do_ceptral_normalize
_a = normalize_means
_a = normalize_vars
_a = True
def _UpperCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE_ : np.ndarray , ):
_a = waveform * (2**1_5) # Kaldi compliance: 16-bit signed integers
_a = torch.from_numpy(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 )
_a = ta_kaldi.fbank(SCREAMING_SNAKE_CASE_ , num_mel_bins=self.num_mel_bins , sample_frequency=self.sampling_rate )
return features.numpy()
@staticmethod
def _UpperCAmelCase ( SCREAMING_SNAKE_CASE_ : np.ndarray , SCREAMING_SNAKE_CASE_ : int , SCREAMING_SNAKE_CASE_ : Optional[bool] = True , SCREAMING_SNAKE_CASE_ : Optional[bool] = True , SCREAMING_SNAKE_CASE_ : float = 0.0 , ):
# make sure we normalize float32 arrays
if normalize_means:
_a = x[:input_length].mean(axis=0 )
_a = np.subtract(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if normalize_vars:
_a = x[:input_length].std(axis=0 )
_a = np.divide(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if input_length < x.shape[0]:
_a = padding_value
# make sure array is in float32
_a = x.astype(np.floataa )
return x
def _UpperCAmelCase ( self : Dict , SCREAMING_SNAKE_CASE_ : List[np.ndarray] , SCREAMING_SNAKE_CASE_ : Optional[np.ndarray] = None ):
_a = attention_mask.sum(-1 ) if attention_mask is not None else [x.shape[0] for x in input_features]
return [
self.utterance_cmvn(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , self.normalize_means , self.normalize_vars , self.padding_value )
for x, n in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
]
def __call__( self : Optional[Any] , SCREAMING_SNAKE_CASE_ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , SCREAMING_SNAKE_CASE_ : Union[bool, str, PaddingStrategy] = False , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : bool = False , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[Union[str, TensorType]] = None , SCREAMING_SNAKE_CASE_ : Optional[int] = None , SCREAMING_SNAKE_CASE_ : Optional[bool] = None , **SCREAMING_SNAKE_CASE_ : int , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
_a = isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
_a = is_batched_numpy or (
isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_a = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ):
_a = np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa )
elif isinstance(SCREAMING_SNAKE_CASE_ , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_a = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_a = [raw_speech]
# extract fbank features
_a = [self._extract_fbank_features(SCREAMING_SNAKE_CASE_ ) for waveform in raw_speech]
# convert into correct format for padding
_a = BatchFeature({'input_features': features} )
_a = self.pad(
SCREAMING_SNAKE_CASE_ , padding=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ , truncation=SCREAMING_SNAKE_CASE_ , pad_to_multiple_of=SCREAMING_SNAKE_CASE_ , return_attention_mask=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ , )
# make sure list is in array format
_a = padded_inputs.get('input_features' )
if isinstance(input_features[0] , SCREAMING_SNAKE_CASE_ ):
_a = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.floataa ) for feature in input_features]
_a = padded_inputs.get('attention_mask' )
if attention_mask is not None:
_a = [np.asarray(SCREAMING_SNAKE_CASE_ , dtype=np.intaa ) for array in attention_mask]
# Utterance-level cepstral mean and variance normalization
if self.do_ceptral_normalize:
_a = (
np.array(SCREAMING_SNAKE_CASE_ , dtype=np.intaa )
if self._get_padding_strategies(SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ ) is not PaddingStrategy.DO_NOT_PAD
else None
)
_a = self.normalize(
padded_inputs['input_features'] , attention_mask=SCREAMING_SNAKE_CASE_ )
if return_tensors is not None:
_a = padded_inputs.convert_to_tensors(SCREAMING_SNAKE_CASE_ )
return padded_inputs
| 562 | 0 |
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class _UpperCamelCase( SCREAMING_SNAKE_CASE , unittest.TestCase ):
__A: Union[str, Any] = RoCBertTokenizer
__A: int = None
__A: Optional[Any] = False
__A: Any = True
__A: int = filter_non_english
def a__ ( self : Optional[Any] ):
super().setUp()
_UpperCAmelCase : Any = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"]
_UpperCAmelCase : Tuple = {}
_UpperCAmelCase : List[str] = {}
for i, value in enumerate(_lowerCamelCase ):
_UpperCAmelCase : Dict = i
_UpperCAmelCase : int = i
_UpperCAmelCase : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
_UpperCAmelCase : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_shape_file"] )
_UpperCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["word_pronunciation_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
with open(self.word_shape_file , "w" , encoding="utf-8" ) as word_shape_writer:
json.dump(_lowerCamelCase , _lowerCamelCase , ensure_ascii=_lowerCamelCase )
with open(self.word_pronunciation_file , "w" , encoding="utf-8" ) as word_pronunciation_writer:
json.dump(_lowerCamelCase , _lowerCamelCase , ensure_ascii=_lowerCamelCase )
def a__ ( self : Union[str, Any] ):
_UpperCAmelCase : List[Any] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
_UpperCAmelCase : Any = tokenizer.tokenize("你好[SEP]你是谁" )
self.assertListEqual(_lowerCamelCase , ["你", "好", "[SEP]", "你", "是", "谁"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(_lowerCamelCase ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(_lowerCamelCase ) , [5, 6, 2, 5, 7, 8] )
def a__ ( self : Union[str, Any] ):
_UpperCAmelCase : List[Any] = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize("ah\u535A\u63A8zz" ) , ["ah", "\u535A", "\u63A8", "zz"] )
def a__ ( self : List[Any] ):
_UpperCAmelCase : Tuple = RoCBertBasicTokenizer(do_lower_case=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["hello", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def a__ ( self : Optional[int] ):
_UpperCAmelCase : List[str] = RoCBertBasicTokenizer(do_lower_case=_lowerCamelCase , strip_accents=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hällo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["h\u00E9llo"] )
def a__ ( self : Optional[Any] ):
_UpperCAmelCase : List[Any] = RoCBertBasicTokenizer(do_lower_case=_lowerCamelCase , strip_accents=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def a__ ( self : Tuple ):
_UpperCAmelCase : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["hallo", "!", "how", "are", "you", "?"] )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["hello"] )
def a__ ( self : str ):
_UpperCAmelCase : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? " ) , ["HeLLo", "!", "how", "Are", "yoU", "?"] )
def a__ ( self : Tuple ):
_UpperCAmelCase : Optional[int] = RoCBertBasicTokenizer(do_lower_case=_lowerCamelCase , strip_accents=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HäLLo", "!", "how", "Are", "yoU", "?"] )
def a__ ( self : List[Any] ):
_UpperCAmelCase : Union[str, Any] = RoCBertBasicTokenizer(do_lower_case=_lowerCamelCase , strip_accents=_lowerCamelCase )
self.assertListEqual(
tokenizer.tokenize(" \tHäLLo!how \n Are yoU? " ) , ["HaLLo", "!", "how", "Are", "yoU", "?"] )
def a__ ( self : List[str] ):
_UpperCAmelCase : int = RoCBertBasicTokenizer(do_lower_case=_lowerCamelCase , never_split=["[UNK]"] )
self.assertListEqual(
tokenizer.tokenize(" \tHeLLo!how \n Are yoU? [UNK]" ) , ["HeLLo", "!", "how", "Are", "yoU", "?", "[UNK]"] )
def a__ ( self : List[Any] ):
_UpperCAmelCase : int = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
_UpperCAmelCase : Tuple = {}
for i, token in enumerate(_lowerCamelCase ):
_UpperCAmelCase : int = i
_UpperCAmelCase : int = RoCBertWordpieceTokenizer(vocab=_lowerCamelCase , unk_token="[UNK]" )
self.assertListEqual(tokenizer.tokenize("" ) , [] )
self.assertListEqual(tokenizer.tokenize("unwanted running" ) , ["un", "##want", "##ed", "runn", "##ing"] )
self.assertListEqual(tokenizer.tokenize("unwantedX running" ) , ["[UNK]", "runn", "##ing"] )
def a__ ( self : Optional[int] ):
self.assertTrue(_is_whitespace(" " ) )
self.assertTrue(_is_whitespace("\t" ) )
self.assertTrue(_is_whitespace("\r" ) )
self.assertTrue(_is_whitespace("\n" ) )
self.assertTrue(_is_whitespace("\u00A0" ) )
self.assertFalse(_is_whitespace("A" ) )
self.assertFalse(_is_whitespace("-" ) )
def a__ ( self : Optional[Any] ):
self.assertTrue(_is_control("\u0005" ) )
self.assertFalse(_is_control("A" ) )
self.assertFalse(_is_control(" " ) )
self.assertFalse(_is_control("\t" ) )
self.assertFalse(_is_control("\r" ) )
def a__ ( self : Any ):
self.assertTrue(_is_punctuation("-" ) )
self.assertTrue(_is_punctuation("$" ) )
self.assertTrue(_is_punctuation("`" ) )
self.assertTrue(_is_punctuation("." ) )
self.assertFalse(_is_punctuation("A" ) )
self.assertFalse(_is_punctuation(" " ) )
def a__ ( self : Optional[Any] ):
_UpperCAmelCase : Dict = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_lowerCamelCase ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
if self.test_rust_tokenizer:
_UpperCAmelCase : Any = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(_lowerCamelCase ) for t in ["Test", "\xad", "test"]] , [["[UNK]"], [], ["[UNK]"]] )
def a__ ( self : Dict ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_UpperCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
_UpperCAmelCase : Optional[Any] = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
_UpperCAmelCase : Optional[int] = tokenizer_r.encode_plus(
_lowerCamelCase , return_attention_mask=_lowerCamelCase , return_token_type_ids=_lowerCamelCase , return_offsets_mapping=_lowerCamelCase , add_special_tokens=_lowerCamelCase , )
_UpperCAmelCase : Tuple = tokenizer_r.do_lower_case if hasattr(_lowerCamelCase , "do_lower_case" ) else False
_UpperCAmelCase : Union[str, Any] = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["input_ids"] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["offset_mapping"] )
def a__ ( self : str ):
_UpperCAmelCase : Union[str, Any] = ["的", "人", "有"]
_UpperCAmelCase : Tuple = "".join(_lowerCamelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_UpperCAmelCase : Optional[Any] = True
_UpperCAmelCase : List[str] = self.tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
_UpperCAmelCase : Dict = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
_UpperCAmelCase : Optional[Any] = tokenizer_p.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
_UpperCAmelCase : Any = tokenizer_r.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
_UpperCAmelCase : List[Any] = tokenizer_r.convert_ids_to_tokens(_lowerCamelCase )
_UpperCAmelCase : Any = tokenizer_p.convert_ids_to_tokens(_lowerCamelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
_UpperCAmelCase : Optional[Any] = False
_UpperCAmelCase : str = self.rust_tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
_UpperCAmelCase : Any = self.tokenizer_class.from_pretrained(_lowerCamelCase , **_lowerCamelCase )
_UpperCAmelCase : int = tokenizer_r.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
_UpperCAmelCase : Any = tokenizer_p.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
_UpperCAmelCase : Optional[Any] = tokenizer_r.convert_ids_to_tokens(_lowerCamelCase )
_UpperCAmelCase : Dict = tokenizer_p.convert_ids_to_tokens(_lowerCamelCase )
# it is expected that only the first Chinese character is not preceded by "##".
_UpperCAmelCase : Optional[Any] = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(_lowerCamelCase )
]
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
@slow
def a__ ( self : Union[str, Any] ):
_UpperCAmelCase : Optional[Any] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
_UpperCAmelCase : List[str] = tokenizer.encode("你好" , add_special_tokens=_lowerCamelCase )
_UpperCAmelCase : int = tokenizer.encode("你是谁" , add_special_tokens=_lowerCamelCase )
_UpperCAmelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase )
_UpperCAmelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_lowerCamelCase , _lowerCamelCase )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def a__ ( self : List[Any] ):
_UpperCAmelCase : List[Any] = self.get_tokenizers(do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
_UpperCAmelCase : Union[str, Any] = "你好,你是谁"
_UpperCAmelCase : str = tokenizer.tokenize(_lowerCamelCase )
_UpperCAmelCase : str = tokenizer.convert_tokens_to_ids(_lowerCamelCase )
_UpperCAmelCase : Dict = tokenizer.convert_tokens_to_shape_ids(_lowerCamelCase )
_UpperCAmelCase : int = tokenizer.convert_tokens_to_pronunciation_ids(_lowerCamelCase )
_UpperCAmelCase : List[str] = tokenizer.prepare_for_model(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , add_special_tokens=_lowerCamelCase )
_UpperCAmelCase : Tuple = tokenizer.encode_plus(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertEqual(_lowerCamelCase , _lowerCamelCase )
| 705 |
import warnings
from ...utils import logging
from .image_processing_imagegpt import ImageGPTImageProcessor
__lowerCamelCase = logging.get_logger(__name__)
class _UpperCamelCase( SCREAMING_SNAKE_CASE ):
def __init__( self : Any , *_lowerCamelCase : Any , **_lowerCamelCase : Any ):
warnings.warn(
"The class ImageGPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use ImageGPTImageProcessor instead." , _lowerCamelCase , )
super().__init__(*_lowerCamelCase , **_lowerCamelCase )
| 328 | 0 |
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _SCREAMING_SNAKE_CASE ( ) -> List[Any]:
"""simple docstring"""
__A = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch """
"""helper utility that will spawn up """
"""multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=A__ , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=A__ , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=A__ )
return parser.parse_args()
def _SCREAMING_SNAKE_CASE ( ) -> List[str]:
"""simple docstring"""
__A = parse_args()
# Import training_script as a module.
__A = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
__A = script_fpath.stem
__A = importlib.import_module(A__ )
# Patch sys.argv
__A = [args.training_script] + args.training_script_args + ['''--tpu_num_cores''', str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main()
| 637 |
import os
def a ( A__ = "matrix.txt" ) -> int:
'''simple docstring'''
with open(os.path.join(os.path.dirname(A__ ) , A__ ) ) as in_file:
SCREAMING_SNAKE_CASE__ : Optional[Any] = in_file.read()
SCREAMING_SNAKE_CASE__ : Optional[Any] = [[int(A__ ) for cell in row.split(''',''' )] for row in data.strip().splitlines()]
SCREAMING_SNAKE_CASE__ : Dict = [[0 for cell in row] for row in grid]
SCREAMING_SNAKE_CASE__ : Any = len(grid[0] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [[0 for i in range(A__ )] for j in range(A__ )]
SCREAMING_SNAKE_CASE__ : Tuple = grid[0][0]
for i in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : List[str] = grid[0][i] + dp[0][i - 1]
for i in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : List[str] = grid[i][0] + dp[i - 1][0]
for i in range(1 , A__ ):
for j in range(1 , A__ ):
SCREAMING_SNAKE_CASE__ : Optional[int] = grid[i][j] + min(dp[i - 1][j] , dp[i][j - 1] )
return dp[-1][-1]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 35 | 0 |
import random
import unittest
import torch
from diffusers import IFImgaImgSuperResolutionPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowercase ( __lowerCamelCase,__lowerCamelCase,unittest.TestCase ):
_lowercase : Tuple = IFImgaImgSuperResolutionPipeline
_lowercase : Optional[int] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {'width', 'height'}
_lowercase : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'original_image'} )
_lowercase : Any = PipelineTesterMixin.required_optional_params - {'latents'}
def UpperCamelCase ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
return self._get_superresolution_dummy_components()
def UpperCamelCase ( self : Union[str, Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : str=0 ) -> List[str]:
"""simple docstring"""
if str(lowerCamelCase__ ).startswith('''mps''' ):
A_ = torch.manual_seed(lowerCamelCase__ )
else:
A_ = torch.Generator(device=lowerCamelCase__ ).manual_seed(lowerCamelCase__ )
A_ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
A_ = floats_tensor((1, 3, 1_6, 1_6) , rng=random.Random(lowerCamelCase__ ) ).to(lowerCamelCase__ )
A_ = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''image''': image,
'''original_image''': original_image,
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() , reason='''XFormers attention is only available with CUDA and `xformers` installed''' , )
def UpperCamelCase ( self : int ) -> Tuple:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1e-3 )
def UpperCamelCase ( self : Optional[Any] ) -> str:
"""simple docstring"""
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' , reason='''float16 requires CUDA''' )
def UpperCamelCase ( self : int ) -> Optional[int]:
"""simple docstring"""
super().test_save_load_floataa(expected_max_diff=1e-1 )
def UpperCamelCase ( self : int ) -> List[str]:
"""simple docstring"""
self._test_attention_slicing_forward_pass(expected_max_diff=1e-2 )
def UpperCamelCase ( self : str ) -> Optional[Any]:
"""simple docstring"""
self._test_save_load_local()
def UpperCamelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
self._test_inference_batch_single_identical(
expected_max_diff=1e-2 , )
| 563 |
from timeit import timeit
__lowercase = {
"""MALAYALAM""": True,
"""String""": False,
"""rotor""": True,
"""level""": True,
"""A""": True,
"""BB""": True,
"""ABC""": False,
"""amanaplanacanalpanama""": True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = 0
A_ = len(SCREAMING_SNAKE_CASE ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = len(SCREAMING_SNAKE_CASE ) // 2
A_ = len(SCREAMING_SNAKE_CASE )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(SCREAMING_SNAKE_CASE ) )
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE ) <= 2:
return True
if s[0] == s[len(SCREAMING_SNAKE_CASE ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return s == s[::-1]
def _lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A_ = f"all({name}(key) is value for key, value in test_data.items())"
A_ = f"from __main__ import test_data, {name}"
A_ = 500000
A_ = timeit(stmt=SCREAMING_SNAKE_CASE , setup=SCREAMING_SNAKE_CASE , number=SCREAMING_SNAKE_CASE )
print(f"{name:<35} finished {number:,} runs in {result:.5f} seconds" )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(f'{key:21} {value}')
print("""a man a plan a canal panama""")
# finished 500,000 runs in 0.46793 seconds
benchmark_function("""is_palindrome_slice""")
# finished 500,000 runs in 0.85234 seconds
benchmark_function("""is_palindrome""")
# finished 500,000 runs in 1.32028 seconds
benchmark_function("""is_palindrome_recursive""")
# finished 500,000 runs in 2.08679 seconds
benchmark_function("""is_palindrome_traversal""")
| 563 | 1 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def SCREAMING_SNAKE_CASE ( *_UpperCAmelCase , _UpperCAmelCase = None , _UpperCAmelCase=True , _UpperCAmelCase=2 ) -> Any:
from .. import __version__
lowerCamelCase__ : Union[str, Any] = take_from
lowerCamelCase__ : Optional[Any] = ()
if not isinstance(args[0] , _UpperCAmelCase ):
lowerCamelCase__ : Optional[int] = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(_UpperCAmelCase ).base_version ) >= version.parse(_UpperCAmelCase ):
raise ValueError(
F"""The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers'"""
F""" version {__version__} is >= {version_name}""" )
lowerCamelCase__ : Optional[int] = None
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(_UpperCAmelCase ),)
lowerCamelCase__ : Dict = F"""The `{attribute}` argument is deprecated and will be removed in version {version_name}."""
elif hasattr(_UpperCAmelCase , _UpperCAmelCase ):
values += (getattr(_UpperCAmelCase , _UpperCAmelCase ),)
lowerCamelCase__ : Union[str, Any] = F"""The `{attribute}` attribute is deprecated and will be removed in version {version_name}."""
elif deprecated_kwargs is None:
lowerCamelCase__ : Optional[int] = F"""`{attribute}` is deprecated and will be removed in version {version_name}."""
if warning is not None:
lowerCamelCase__ : int = warning + ' ' if standard_warn else ''
warnings.warn(warning + message , _UpperCAmelCase , stacklevel=_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) > 0:
lowerCamelCase__ : Optional[Any] = inspect.getouterframes(inspect.currentframe() )[1]
lowerCamelCase__ : Union[str, Any] = call_frame.filename
lowerCamelCase__ : Any = call_frame.lineno
lowerCamelCase__ : int = call_frame.function
lowerCamelCase__ , lowerCamelCase__ : Optional[int] = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F"""{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`""" )
if len(_UpperCAmelCase ) == 0:
return
elif len(_UpperCAmelCase ) == 1:
return values[0]
return values
| 295 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
_UpperCAmelCase : Any = logging.get_logger(__name__)
class lowerCAmelCase ( __UpperCamelCase ):
UpperCAmelCase__ = ["""pixel_values"""]
def __init__( self : Any , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : int = 0.9 , UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase : bool = True , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : Union[int, float] = 1 / 255 , UpperCAmelCase : bool = True , UpperCAmelCase : bool = True , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , **UpperCAmelCase : str , ) -> None:
super().__init__(**UpperCAmelCase )
lowerCamelCase__ : int = size if size is not None else {'shortest_edge': 224}
lowerCamelCase__ : Tuple = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
lowerCamelCase__ : Any = crop_size if crop_size is not None else {'height': 224, 'width': 224}
lowerCamelCase__ : str = get_size_dict(UpperCAmelCase , param_name='crop_size' )
lowerCamelCase__ : Tuple = do_resize
lowerCamelCase__ : str = size
lowerCamelCase__ : List[str] = crop_pct
lowerCamelCase__ : Any = resample
lowerCamelCase__ : Tuple = do_center_crop
lowerCamelCase__ : Any = crop_size
lowerCamelCase__ : Optional[int] = do_rescale
lowerCamelCase__ : Optional[Any] = rescale_factor
lowerCamelCase__ : Union[str, Any] = do_normalize
lowerCamelCase__ : Any = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
lowerCamelCase__ : Optional[Any] = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def A_ ( self : Optional[Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : Optional[float] = None , UpperCAmelCase : PILImageResampling = PILImageResampling.BICUBIC , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : int , ) -> np.ndarray:
lowerCamelCase__ : Any = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
if "shortest_edge" not in size and ("height" not in size or "width" not in size):
raise ValueError(F"""size must contain 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
if crop_pct is not None:
if "shortest_edge" in size:
lowerCamelCase__ : List[Any] = int(size['shortest_edge'] / crop_pct )
elif "height" in size and "width" in size:
if size["height"] == size["width"]:
lowerCamelCase__ : int = int(size['height'] / crop_pct )
else:
lowerCamelCase__ : Any = (int(size['height'] / crop_pct ), int(size['width'] / crop_pct ))
else:
raise ValueError('Invalid size for resize: {}'.format(UpperCAmelCase ) )
lowerCamelCase__ : Union[str, Any] = get_resize_output_image_size(UpperCAmelCase , size=UpperCAmelCase , default_to_square=UpperCAmelCase )
else:
if "shortest_edge" in size:
lowerCamelCase__ : int = get_resize_output_image_size(UpperCAmelCase , size=size['shortest_edge'] , default_to_square=UpperCAmelCase )
elif "height" in size and "width" in size:
lowerCamelCase__ : List[Any] = (size['height'], size['width'])
else:
raise ValueError('Invalid size for resize: {}'.format(UpperCAmelCase ) )
return resize(UpperCAmelCase , size=UpperCAmelCase , resample=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A_ ( self : Optional[Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Dict[str, int] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : str , ) -> np.ndarray:
lowerCamelCase__ : Union[str, Any] = get_size_dict(UpperCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F"""size must contain 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(UpperCAmelCase , size=(size['height'], size['width']) , data_format=UpperCAmelCase , **UpperCAmelCase )
def A_ ( self : Union[str, Any] , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[int, float] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : List[Any] , ) -> int:
return rescale(UpperCAmelCase , scale=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A_ ( self : Dict , UpperCAmelCase : np.ndarray , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Union[float, List[float]] , UpperCAmelCase : Optional[Union[str, ChannelDimension]] = None , **UpperCAmelCase : Dict , ) -> np.ndarray:
return normalize(UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase , data_format=UpperCAmelCase , **UpperCAmelCase )
def A_ ( self : str , UpperCAmelCase : ImageInput , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : int = None , UpperCAmelCase : PILImageResampling = None , UpperCAmelCase : bool = None , UpperCAmelCase : Dict[str, int] = None , UpperCAmelCase : bool = None , UpperCAmelCase : float = None , UpperCAmelCase : bool = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[float, List[float]]] = None , UpperCAmelCase : Optional[Union[str, TensorType]] = None , UpperCAmelCase : ChannelDimension = ChannelDimension.FIRST , **UpperCAmelCase : Any , ) -> PIL.Image.Image:
lowerCamelCase__ : Optional[Any] = do_resize if do_resize is not None else self.do_resize
lowerCamelCase__ : Any = crop_pct if crop_pct is not None else self.crop_pct
lowerCamelCase__ : Any = resample if resample is not None else self.resample
lowerCamelCase__ : Any = do_center_crop if do_center_crop is not None else self.do_center_crop
lowerCamelCase__ : Optional[int] = do_rescale if do_rescale is not None else self.do_rescale
lowerCamelCase__ : List[Any] = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCamelCase__ : Union[str, Any] = do_normalize if do_normalize is not None else self.do_normalize
lowerCamelCase__ : Any = image_mean if image_mean is not None else self.image_mean
lowerCamelCase__ : str = image_std if image_std is not None else self.image_std
lowerCamelCase__ : Optional[Any] = size if size is not None else self.size
lowerCamelCase__ : Optional[Any] = get_size_dict(UpperCAmelCase , default_to_square=UpperCAmelCase )
lowerCamelCase__ : List[str] = crop_size if crop_size is not None else self.crop_size
lowerCamelCase__ : int = get_size_dict(UpperCAmelCase , param_name='crop_size' )
lowerCamelCase__ : Any = make_list_of_images(UpperCAmelCase )
if not valid_images(UpperCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_pct is None:
raise ValueError('Crop_pct must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
lowerCamelCase__ : Dict = [to_numpy_array(UpperCAmelCase ) for image in images]
if do_resize:
lowerCamelCase__ : Any = [self.resize(image=UpperCAmelCase , size=UpperCAmelCase , crop_pct=UpperCAmelCase , resample=UpperCAmelCase ) for image in images]
if do_center_crop:
lowerCamelCase__ : Optional[Any] = [self.center_crop(image=UpperCAmelCase , size=UpperCAmelCase ) for image in images]
if do_rescale:
lowerCamelCase__ : int = [self.rescale(image=UpperCAmelCase , scale=UpperCAmelCase ) for image in images]
if do_normalize:
lowerCamelCase__ : List[Any] = [self.normalize(image=UpperCAmelCase , mean=UpperCAmelCase , std=UpperCAmelCase ) for image in images]
lowerCamelCase__ : Dict = [to_channel_dimension_format(UpperCAmelCase , UpperCAmelCase ) for image in images]
lowerCamelCase__ : Dict = {'pixel_values': images}
return BatchFeature(data=UpperCAmelCase , tensor_type=UpperCAmelCase )
| 295 | 1 |
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
a_ : Optional[Any] = logging.get_logger(__name__)
class UpperCamelCase ( SCREAMING_SNAKE_CASE ):
__UpperCamelCase ="linear"
__UpperCamelCase ="cosine"
__UpperCamelCase ="cosine_with_restarts"
__UpperCamelCase ="polynomial"
__UpperCamelCase ="constant"
__UpperCamelCase ="constant_with_warmup"
__UpperCamelCase ="piecewise_constant"
def __lowerCAmelCase ( _UpperCamelCase : Optimizer , _UpperCamelCase : int = -1 ) -> int:
'''simple docstring'''
return LambdaLR(_UpperCamelCase , lambda _UpperCamelCase : 1 , last_epoch=_UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : Optimizer , _UpperCamelCase : int , _UpperCamelCase : int = -1 ) -> Optional[Any]:
'''simple docstring'''
def lr_lambda(_UpperCamelCase : int ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1.0 , _UpperCamelCase ) )
return 1.0
return LambdaLR(_UpperCamelCase , _UpperCamelCase , last_epoch=_UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : Optimizer , _UpperCamelCase : str , _UpperCamelCase : int = -1 ) -> List[Any]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = {}
SCREAMING_SNAKE_CASE = step_rules.split(',' )
for rule_str in rule_list[:-1]:
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = rule_str.split(':' )
SCREAMING_SNAKE_CASE = int(_UpperCamelCase )
SCREAMING_SNAKE_CASE = float(_UpperCamelCase )
SCREAMING_SNAKE_CASE = value
SCREAMING_SNAKE_CASE = float(rule_list[-1] )
def create_rules_function(_UpperCamelCase : Optional[int] , _UpperCamelCase : Union[str, Any] ):
def rule_func(_UpperCamelCase : int ) -> float:
SCREAMING_SNAKE_CASE = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(_UpperCamelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
SCREAMING_SNAKE_CASE = create_rules_function(_UpperCamelCase , _UpperCamelCase )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , last_epoch=_UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : Any , _UpperCamelCase : Tuple , _UpperCamelCase : List[str] , _UpperCamelCase : List[Any]=-1 ) -> List[str]:
'''simple docstring'''
def lr_lambda(_UpperCamelCase : int ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : Optimizer , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : float = 0.5 , _UpperCamelCase : int = -1 ) -> Dict:
'''simple docstring'''
def lr_lambda(_UpperCamelCase : int ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(_UpperCamelCase ) * 2.0 * progress )) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : Optimizer , _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int = 1 , _UpperCamelCase : int = -1 ) -> Optional[int]:
'''simple docstring'''
def lr_lambda(_UpperCamelCase : Union[str, Any] ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(_UpperCamelCase ) * progress) % 1.0) )) )
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
def __lowerCAmelCase ( _UpperCamelCase : Optional[int] , _UpperCamelCase : List[Any] , _UpperCamelCase : Tuple , _UpperCamelCase : Union[str, Any]=1e-7 , _UpperCamelCase : Dict=1.0 , _UpperCamelCase : Tuple=-1 ) -> Dict:
'''simple docstring'''
SCREAMING_SNAKE_CASE = optimizer.defaults['lr']
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(_UpperCamelCase : int ):
if current_step < num_warmup_steps:
return float(_UpperCamelCase ) / float(max(1 , _UpperCamelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
SCREAMING_SNAKE_CASE = lr_init - lr_end
SCREAMING_SNAKE_CASE = num_training_steps - num_warmup_steps
SCREAMING_SNAKE_CASE = 1 - (current_step - num_warmup_steps) / decay_steps
SCREAMING_SNAKE_CASE = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
a_ : List[Any] = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __lowerCAmelCase ( _UpperCamelCase : Union[str, SchedulerType] , _UpperCamelCase : Optimizer , _UpperCamelCase : Optional[str] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : Optional[int] = None , _UpperCamelCase : int = 1 , _UpperCamelCase : float = 1.0 , _UpperCamelCase : int = -1 , ) -> Optional[int]:
'''simple docstring'''
SCREAMING_SNAKE_CASE = SchedulerType(_UpperCamelCase )
SCREAMING_SNAKE_CASE = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(_UpperCamelCase , last_epoch=_UpperCamelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(_UpperCamelCase , step_rules=_UpperCamelCase , last_epoch=_UpperCamelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(_UpperCamelCase , num_warmup_steps=_UpperCamelCase , last_epoch=_UpperCamelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , num_cycles=_UpperCamelCase , last_epoch=_UpperCamelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , power=_UpperCamelCase , last_epoch=_UpperCamelCase , )
return schedule_func(
_UpperCamelCase , num_warmup_steps=_UpperCamelCase , num_training_steps=_UpperCamelCase , last_epoch=_UpperCamelCase )
| 673 |
#
# This a `torch.distributed` diagnostics script that checks that all GPUs in the cluster (one or
# many nodes) can talk to each other via nccl and allocate gpu memory.
#
# To run first adjust the number of processes and nodes:
#
# python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# You may need to add --master_addr $MASTER_ADDR --master_port $MASTER_PORT if using a custom addr:port
#
# You can also use the rdzv API: --rdzv_endpoint $MASTER_ADDR:$MASTER_PORT --rdzv_backend c10d
#
# use torch.distributed.launch instead of torch.distributed.run for torch < 1.9
#
# If you get a hanging in `barrier` calls you have some network issues, you may try to debug this with:
#
# NCCL_DEBUG=INFO python -m torch.distributed.run --nproc_per_node 2 --nnodes 1 torch-distributed-gpu-test.py
#
# which should tell you what's going on behind the scenes.
#
#
# This script can be run via `srun` in the SLURM environment as well. Here is a SLURM script that
# runs on 2 nodes of 4 gpus per node:
#
# #SBATCH --job-name=test-nodes # name
# #SBATCH --nodes=2 # nodes
# #SBATCH --ntasks-per-node=1 # crucial - only 1 task per dist per node!
# #SBATCH --cpus-per-task=10 # number of cores per tasks
# #SBATCH --gres=gpu:4 # number of gpus
# #SBATCH --time 0:05:00 # maximum execution time (HH:MM:SS)
# #SBATCH --output=%x-%j.out # output file name
#
# GPUS_PER_NODE=4
# MASTER_ADDR=$(scontrol show hostnames $SLURM_JOB_NODELIST | head -n 1)
# MASTER_PORT=6000
#
# srun --jobid $SLURM_JOBID bash -c 'python -m torch.distributed.run \
# --nproc_per_node $GPUS_PER_NODE --nnodes $SLURM_NNODES --node_rank $SLURM_PROCID \
# --master_addr $MASTER_ADDR --master_port $MASTER_PORT \
# torch-distributed-gpu-test.py'
#
import fcntl
import os
import socket
import torch
import torch.distributed as dist
def __lowerCAmelCase ( *_UpperCamelCase : Optional[Any] ) -> Optional[int]:
'''simple docstring'''
with open(_UpperCamelCase , 'r' ) as fh:
fcntl.flock(_UpperCamelCase , fcntl.LOCK_EX )
try:
print(*_UpperCamelCase )
finally:
fcntl.flock(_UpperCamelCase , fcntl.LOCK_UN )
a_ : int = int(os.environ["LOCAL_RANK"])
torch.cuda.set_device(local_rank)
a_ : str = torch.device("cuda", local_rank)
a_ : Optional[int] = socket.gethostname()
a_ : Union[str, Any] = F"""[{hostname}-{local_rank}]"""
try:
# test distributed
dist.init_process_group("nccl")
dist.all_reduce(torch.ones(1).to(device), op=dist.ReduceOp.SUM)
dist.barrier()
# test cuda is available and can allocate memory
torch.cuda.is_available()
torch.ones(1).cuda(local_rank)
# global rank
a_ : Dict = dist.get_rank()
a_ : Any = dist.get_world_size()
printflock(F"""{gpu} is OK (global rank: {rank}/{world_size})""")
dist.barrier()
if rank == 0:
printflock(F"""pt={torch.__version__}, cuda={torch.version.cuda}, nccl={torch.cuda.nccl.version()}""")
except Exception:
printflock(F"""{gpu} is broken""")
raise
| 673 | 1 |
"""simple docstring"""
def a__ ( lowerCAmelCase__ ):
if number > 0:
raise ValueError("input must be a negative integer" )
UpperCAmelCase_ = len(bin(lowerCAmelCase__ )[3:] )
UpperCAmelCase_ = bin(abs(lowerCAmelCase__ ) - (1 << binary_number_length) )[3:]
UpperCAmelCase_ = (
(
"1"
+ "0" * (binary_number_length - len(lowerCAmelCase__ ))
+ twos_complement_number
)
if number < 0
else "0"
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_UpperCAmelCase = logging.get_logger(__name__)
_UpperCAmelCase = {
"""post_extract_proj""": """feature_projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.upsample.0""": """encoder.upsample.projection""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def UpperCamelCase ( __lowercase : int ,__lowercase : List[str] ,__lowercase : str ,__lowercase : Optional[Any] ,__lowercase : Any ):
'''simple docstring'''
for attribute in key.split('.' ):
A_ : Dict = getattr(__lowercase ,__lowercase )
if weight_type is not None:
A_ : Any = getattr(__lowercase ,__lowercase ).shape
else:
A_ : Optional[Any] = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
A_ : int = value
elif weight_type == "weight_g":
A_ : Tuple = value
elif weight_type == "weight_v":
A_ : Union[str, Any] = value
elif weight_type == "bias":
A_ : Any = value
else:
A_ : str = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def UpperCamelCase ( __lowercase : str ,__lowercase : Dict ,__lowercase : Tuple ):
'''simple docstring'''
A_ : Optional[Any] = []
A_ : Tuple = fairseq_model.state_dict()
A_ : Any = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
A_ : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
__lowercase ,__lowercase ,__lowercase ,__lowercase ,hf_model.config.feat_extract_norm == 'group' ,)
A_ : List[str] = True
else:
for key, mapped_key in MAPPING.items():
A_ : str = 'sew.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
A_ : int = True
if "*" in mapped_key:
A_ : str = name.split(__lowercase )[0].split('.' )[-2]
A_ : Optional[Any] = mapped_key.replace('*' ,__lowercase )
if "weight_g" in name:
A_ : Dict = 'weight_g'
elif "weight_v" in name:
A_ : Tuple = 'weight_v'
elif "weight" in name:
A_ : Union[str, Any] = 'weight'
elif "bias" in name:
A_ : Optional[Any] = 'bias'
else:
A_ : Union[str, Any] = None
set_recursively(__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
continue
if not is_used:
unused_weights.append(__lowercase )
logger.warning(f'''Unused weights: {unused_weights}''' )
def UpperCamelCase ( __lowercase : Optional[Any] ,__lowercase : Union[str, Any] ,__lowercase : Any ,__lowercase : List[Any] ,__lowercase : Union[str, Any] ):
'''simple docstring'''
A_ : Optional[int] = full_name.split('conv_layers.' )[-1]
A_ : Any = name.split('.' )
A_ : Dict = int(items[0] )
A_ : Optional[int] = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
A_ : Optional[int] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
A_ : Union[str, Any] = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
A_ : Any = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
A_ : Tuple = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__lowercase )
def UpperCamelCase ( __lowercase : List[str] ,__lowercase : str ):
'''simple docstring'''
A_ : Union[str, Any] = SEWConfig()
if is_finetuned:
A_ : Any = model.wav_encoder.wav_model.cfg
else:
A_ : int = model.cfg
A_ : Any = fs_config.conv_bias
A_ : Dict = eval(fs_config.conv_feature_layers )
A_ : List[Any] = [x[0] for x in conv_layers]
A_ : Optional[Any] = [x[1] for x in conv_layers]
A_ : List[Any] = [x[2] for x in conv_layers]
A_ : Optional[int] = 'gelu'
A_ : Union[str, Any] = 'layer' if fs_config.extractor_mode == 'layer_norm' else 'group'
A_ : Tuple = 0.0
A_ : Dict = fs_config.activation_fn.name
A_ : List[Any] = fs_config.encoder_embed_dim
A_ : int = 0.02
A_ : List[str] = fs_config.encoder_ffn_embed_dim
A_ : Any = 1e-5
A_ : Optional[Any] = fs_config.encoder_layerdrop
A_ : Optional[int] = fs_config.encoder_attention_heads
A_ : Any = fs_config.conv_pos_groups
A_ : int = fs_config.conv_pos
A_ : Tuple = len(__lowercase )
A_ : List[Any] = fs_config.encoder_layers
A_ : Any = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
A_ : Union[str, Any] = model.cfg
A_ : str = fs_config.final_dropout
A_ : Any = fs_config.layerdrop
A_ : str = fs_config.activation_dropout
A_ : Any = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
A_ : str = fs_config.attention_dropout
A_ : Any = fs_config.dropout_input
A_ : Dict = fs_config.dropout
A_ : Optional[Any] = fs_config.mask_channel_length
A_ : List[str] = fs_config.mask_channel_prob
A_ : Tuple = fs_config.mask_length
A_ : Dict = fs_config.mask_prob
A_ : Any = 'Wav2Vec2FeatureExtractor'
A_ : Union[str, Any] = 'Wav2Vec2CTCTokenizer'
return config
@torch.no_grad()
def UpperCamelCase ( __lowercase : List[Any] ,__lowercase : int ,__lowercase : Optional[int]=None ,__lowercase : Optional[Any]=None ,__lowercase : str=True ):
'''simple docstring'''
if is_finetuned:
A_ , A_ , A_ : List[Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] ,arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
A_ , A_ , A_ : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
A_ : Union[str, Any] = SEWConfig.from_pretrained(__lowercase )
else:
A_ : Dict = convert_config(model[0] ,__lowercase )
A_ : Union[str, Any] = model[0].eval()
A_ : Optional[int] = True if config.feat_extract_norm == 'layer' else False
A_ : List[Any] = WavaVecaFeatureExtractor(
feature_size=1 ,sampling_rate=1_60_00 ,padding_value=0 ,do_normalize=__lowercase ,return_attention_mask=__lowercase ,)
if is_finetuned:
if dict_path:
A_ : Optional[int] = Dictionary.load(__lowercase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
A_ : int = target_dict.pad_index
A_ : List[Any] = target_dict.bos_index
A_ : Optional[Any] = target_dict.pad_index
A_ : str = target_dict.bos_index
A_ : str = target_dict.eos_index
A_ : str = len(target_dict.symbols )
A_ : Union[str, Any] = os.path.join(__lowercase ,'vocab.json' )
if not os.path.isdir(__lowercase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(__lowercase ) )
return
os.makedirs(__lowercase ,exist_ok=__lowercase )
with open(__lowercase ,'w' ,encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices ,__lowercase )
A_ : Any = WavaVecaCTCTokenizer(
__lowercase ,unk_token=target_dict.unk_word ,pad_token=target_dict.pad_word ,bos_token=target_dict.bos_word ,eos_token=target_dict.eos_word ,word_delimiter_token='|' ,do_lower_case=__lowercase ,)
A_ : Tuple = WavaVecaProcessor(feature_extractor=__lowercase ,tokenizer=__lowercase )
processor.save_pretrained(__lowercase )
A_ : Dict = SEWForCTC(__lowercase )
else:
A_ : Tuple = SEWModel(__lowercase )
feature_extractor.save_pretrained(__lowercase )
recursively_load_weights(__lowercase ,__lowercase ,__lowercase )
hf_model.save_pretrained(__lowercase )
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--is_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
_UpperCAmelCase = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 558 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__A : List[Any] = {'configuration_unispeech': ['UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP', 'UniSpeechConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__A : int = [
'UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST',
'UniSpeechForCTC',
'UniSpeechForPreTraining',
'UniSpeechForSequenceClassification',
'UniSpeechModel',
'UniSpeechPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_unispeech import UNISPEECH_PRETRAINED_CONFIG_ARCHIVE_MAP, UniSpeechConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_unispeech import (
UNISPEECH_PRETRAINED_MODEL_ARCHIVE_LIST,
UniSpeechForCTC,
UniSpeechForPreTraining,
UniSpeechForSequenceClassification,
UniSpeechModel,
UniSpeechPreTrainedModel,
)
else:
import sys
__A : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 712 |
'''simple docstring'''
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
__A : Any = logging.get_logger(__name__)
__A : Union[str, Any] = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn.grep_linear': 'encoder.layers.*.attention.gru_rel_pos_linear',
'self_attn.relative_attention_bias': 'encoder.layers.*.attention.rel_attn_embed',
'self_attn.grep_a': 'encoder.layers.*.attention.gru_rel_pos_const',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'ctc_proj',
'mask_emb': 'masked_spec_embed',
}
__A : Any = [
'ctc_proj',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
]
def lowerCAmelCase_ ( a : Any , a : Tuple , a : Tuple , a : str , a : int ):
for attribute in key.split('.' ):
a__ = getattr(a , a )
if weight_type is not None:
a__ = getattr(a , a ).shape
else:
a__ = hf_pointer.shape
assert hf_shape == value.shape, (
f'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
f''' {value.shape} for {full_name}'''
)
if weight_type == "weight":
a__ = value
elif weight_type == "weight_g":
a__ = value
elif weight_type == "weight_v":
a__ = value
elif weight_type == "bias":
a__ = value
else:
a__ = value
logger.info(f'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def lowerCAmelCase_ ( a : Union[str, Any] , a : List[str] ):
a__ = []
a__ = fairseq_model.state_dict()
a__ = hf_model.feature_extractor
for name, value in fairseq_dict.items():
a__ = False
if "conv_layers" in name:
load_conv_layer(
a , a , a , a , hf_model.config.feat_extract_norm == 'group' , )
a__ = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
a__ = True
if "*" in mapped_key:
a__ = name.split(a )[0].split('.' )[-2]
a__ = mapped_key.replace('*' , a )
if "weight_g" in name:
a__ = 'weight_g'
elif "weight_v" in name:
a__ = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
a__ = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
a__ = 'weight'
else:
a__ = None
set_recursively(a , a , a , a , a )
continue
if not is_used:
unused_weights.append(a )
logger.warning(f'''Unused weights: {unused_weights}''' )
def lowerCAmelCase_ ( a : Dict , a : Union[str, Any] , a : Optional[Any] , a : Union[str, Any] , a : Optional[int] ):
a__ = full_name.split('conv_layers.' )[-1]
a__ = name.split('.' )
a__ = int(items[0] )
a__ = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'''
)
a__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'''
)
a__ = value
logger.info(f'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'''{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'''
" found."
)
a__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'''{full_name} has size {value.shape}, but'''
f''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'''
)
a__ = value
logger.info(f'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(a )
@torch.no_grad()
def lowerCAmelCase_ ( a : Optional[int] , a : Optional[Any] , a : List[Any]=None ):
# load the pre-trained checkpoints
a__ = torch.load(a )
a__ = WavLMConfigOrig(checkpoint['cfg'] )
a__ = WavLMOrig(a )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
a__ = WavLMConfig.from_pretrained(a )
else:
a__ = WavLMConfig()
a__ = WavLMModel(a )
recursively_load_weights(a , a )
hf_wavlm.save_pretrained(a )
if __name__ == "__main__":
__A : Optional[int] = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
__A : Dict = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 126 | 0 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
def __init__( self:int , _a:Optional[int] , _a:List[Any]=7 , _a:Union[str, Any]=3 , _a:List[str]=18 , _a:Any=30 , _a:str=4_00 , _a:Optional[Any]=True , _a:List[Any]=None , _a:Optional[Any]=True , ):
snake_case__ = size if size is not None else {'''height''': 18, '''width''': 18}
snake_case__ = parent
snake_case__ = batch_size
snake_case__ = num_channels
snake_case__ = image_size
snake_case__ = min_resolution
snake_case__ = max_resolution
snake_case__ = do_resize
snake_case__ = size
snake_case__ = do_normalize
def SCREAMING_SNAKE_CASE__ ( self:int ):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8866443634033203, 0.6618829369544983, 0.3891746401786804],
[-0.6042559146881104, -0.02295008860528469, 0.5423797369003296],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class __magic_name__ (snake_case_ ,unittest.TestCase ):
'''simple docstring'''
__lowercase : str = ImageGPTImageProcessor if is_vision_available() else None
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = ImageGPTImageProcessingTester(self )
@property
def SCREAMING_SNAKE_CASE__ ( self:Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def SCREAMING_SNAKE_CASE__ ( self:Tuple ):
snake_case__ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , '''clusters''' ) )
self.assertTrue(hasattr(_a , '''do_resize''' ) )
self.assertTrue(hasattr(_a , '''size''' ) )
self.assertTrue(hasattr(_a , '''do_normalize''' ) )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'''height''': 18, '''width''': 18} )
snake_case__ = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {'''height''': 42, '''width''': 42} )
def SCREAMING_SNAKE_CASE__ ( self:str ):
snake_case__ = self.image_processing_class(**self.image_processor_dict )
snake_case__ = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(_a , obj[key] ) )
else:
self.assertEqual(obj[key] , _a )
def SCREAMING_SNAKE_CASE__ ( self:Optional[int] ):
snake_case__ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ = os.path.join(_a , '''image_processor.json''' )
image_processor_first.to_json_file(_a )
snake_case__ = self.image_processing_class.from_json_file(_a ).to_dict()
snake_case__ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_a , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _a )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
snake_case__ = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(_a )
snake_case__ = self.image_processing_class.from_pretrained(_a ).to_dict()
snake_case__ = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(_a , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , _a )
@unittest.skip('''ImageGPT requires clusters at initialization''' )
def SCREAMING_SNAKE_CASE__ ( self:List[str] ):
pass
def SCREAMING_SNAKE_CASE ( ) -> Dict:
snake_case__ = load_dataset('''hf-internal-testing/fixtures_image_utils''' , split='''test''' )
snake_case__ = Image.open(dataset[4]['''file'''] )
snake_case__ = Image.open(dataset[5]['''file'''] )
snake_case__ = [imagea, imagea]
return images
@require_vision
@require_torch
class __magic_name__ (unittest.TestCase ):
'''simple docstring'''
@slow
def SCREAMING_SNAKE_CASE__ ( self:Any ):
snake_case__ = ImageGPTImageProcessor.from_pretrained('''openai/imagegpt-small''' )
snake_case__ = prepare_images()
# test non-batched
snake_case__ = image_processing(images[0] , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 10_24) )
snake_case__ = [3_06, 1_91, 1_91]
self.assertEqual(encoding.input_ids[0, :3].tolist() , _a )
# test batched
snake_case__ = image_processing(_a , return_tensors='''pt''' )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 10_24) )
snake_case__ = [3_03, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , _a )
| 33 |
'''simple docstring'''
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
lowerCamelCase_ = 1.0_5457_1817e-34 # unit of ℏ : J * s
lowerCamelCase_ = 3e8 # unit of c : m * s^-1
def SCREAMING_SNAKE_CASE_ ( __A : float , __A : float , __A : float ) -> dict[str, float]:
if (force, area, distance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if force < 0:
raise ValueError("Magnitude of force can not be negative" )
if distance < 0:
raise ValueError("Distance can not be negative" )
if area < 0:
raise ValueError("Area can not be negative" )
if force == 0:
_SCREAMING_SNAKE_CASE = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
2_40 * (distance) ** 4
)
return {"force": force}
elif area == 0:
_SCREAMING_SNAKE_CASE = (2_40 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
_SCREAMING_SNAKE_CASE = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (2_40 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError("One and only one argument must be 0" )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 418 | 0 |
import argparse
import json
import logging
import os
import sys
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, get_gpu_count, slow
lowerCAmelCase__ = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"text-classification",
"language-modeling",
"summarization",
"token-classification",
"question-answering",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_clm_flax
import run_flax_glue
import run_flax_ner
import run_mlm_flax
import run_qa
import run_summarization_flax
import run_ta_mlm_flax
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase__ = logging.getLogger()
def _lowerCAmelCase( ):
UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("-f" )
UpperCAmelCase = parser.parse_args()
return args.f
def _lowerCAmelCase( __A , __A="eval" ):
UpperCAmelCase = os.path.join(__A , F"{split}_results.json" )
if os.path.exists(__A ):
with open(__A , "r" ) as f:
return json.load(__A )
raise ValueError(F"can't find {path}" )
lowerCAmelCase__ = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
class __magic_name__ ( _snake_case ):
def _UpperCamelCase ( self : Any ) -> List[str]:
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --eval_steps=2\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n ".split()
with patch.object(lowerCAmelCase__ , "argv" , lowerCAmelCase__ ):
run_flax_glue.main()
UpperCAmelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
@slow
def _UpperCamelCase ( self : Any ) -> List[str]:
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_clm_flax.py\n --model_name_or_path distilgpt2\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --block_size 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(lowerCAmelCase__ , "argv" , lowerCAmelCase__ ):
run_clm_flax.main()
UpperCAmelCase = get_results(lowerCAmelCase__ )
self.assertLess(result["eval_perplexity"] , 1_0_0 )
@slow
def _UpperCamelCase ( self : Union[str, Any] ) -> Any:
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_summarization.py\n --model_name_or_path t5-small\n --train_file tests/fixtures/tests_samples/xsum/sample.json\n --validation_file tests/fixtures/tests_samples/xsum/sample.json\n --test_file tests/fixtures/tests_samples/xsum/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=8\n --do_train\n --do_eval\n --do_predict\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --predict_with_generate\n ".split()
with patch.object(lowerCAmelCase__ , "argv" , lowerCAmelCase__ ):
run_summarization_flax.main()
UpperCAmelCase = get_results(lowerCAmelCase__ , split="test" )
self.assertGreaterEqual(result["test_rouge1"] , 1_0 )
self.assertGreaterEqual(result["test_rouge2"] , 2 )
self.assertGreaterEqual(result["test_rougeL"] , 7 )
self.assertGreaterEqual(result["test_rougeLsum"] , 7 )
@slow
def _UpperCamelCase ( self : Optional[Any] ) -> Union[str, Any]:
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_mlm.py\n --model_name_or_path distilroberta-base\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --logging_steps 2 --eval_steps 2\n --do_train\n --do_eval\n --num_train_epochs=1\n ".split()
with patch.object(lowerCAmelCase__ , "argv" , lowerCAmelCase__ ):
run_mlm_flax.main()
UpperCAmelCase = get_results(lowerCAmelCase__ )
self.assertLess(result["eval_perplexity"] , 4_2 )
@slow
def _UpperCamelCase ( self : int ) -> Dict:
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_t5_mlm_flax.py\n --model_name_or_path t5-small\n --train_file ./tests/fixtures/sample_text.txt\n --validation_file ./tests/fixtures/sample_text.txt\n --do_train\n --do_eval\n --max_seq_length 128\n --per_device_train_batch_size 4\n --per_device_eval_batch_size 4\n --num_train_epochs 2\n --logging_steps 2 --eval_steps 2\n --output_dir {tmp_dir}\n --overwrite_output_dir\n ".split()
with patch.object(lowerCAmelCase__ , "argv" , lowerCAmelCase__ ):
run_ta_mlm_flax.main()
UpperCAmelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.42 )
@slow
def _UpperCamelCase ( self : int ) -> Any:
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
UpperCAmelCase = 7 if get_gpu_count() > 1 else 2
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_flax_ner.py\n --model_name_or_path bert-base-uncased\n --train_file tests/fixtures/tests_samples/conll/sample.json\n --validation_file tests/fixtures/tests_samples/conll/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --do_train\n --do_eval\n --warmup_steps=2\n --learning_rate=2e-4\n --logging_steps 2 --eval_steps 2\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=2\n --num_train_epochs={epochs}\n --seed 7\n ".split()
with patch.object(lowerCAmelCase__ , "argv" , lowerCAmelCase__ ):
run_flax_ner.main()
UpperCAmelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result["eval_accuracy"] , 0.75 )
self.assertGreaterEqual(result["eval_f1"] , 0.3 )
@slow
def _UpperCamelCase ( self : Optional[Any] ) -> int:
UpperCAmelCase = self.get_auto_remove_tmp_dir()
UpperCAmelCase = f"\n run_qa.py\n --model_name_or_path bert-base-uncased\n --version_2_with_negative\n --train_file tests/fixtures/tests_samples/SQUAD/sample.json\n --validation_file tests/fixtures/tests_samples/SQUAD/sample.json\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --num_train_epochs=3\n --warmup_steps=2\n --do_train\n --do_eval\n --logging_steps 2 --eval_steps 2\n --learning_rate=2e-4\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n ".split()
with patch.object(lowerCAmelCase__ , "argv" , lowerCAmelCase__ ):
run_qa.main()
UpperCAmelCase = get_results(lowerCAmelCase__ )
self.assertGreaterEqual(result["eval_f1"] , 3_0 )
self.assertGreaterEqual(result["eval_exact"] , 3_0 )
| 1 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def _lowerCAmelCase( __A ):
UpperCAmelCase = fname.split(os.path.sep )[-1]
return re.search(r"^(.*)_\d+\.jpg$" , __A ).groups()[0]
class __magic_name__ ( _snake_case ):
def __init__( self : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : int=None ) -> Optional[Any]:
UpperCAmelCase = file_names
UpperCAmelCase = image_transform
UpperCAmelCase = label_to_id
def __len__( self : Tuple ) -> List[str]:
return len(self.file_names )
def __getitem__( self : Optional[int] , lowerCAmelCase__ : Tuple ) -> Dict:
UpperCAmelCase = self.file_names[idx]
UpperCAmelCase = PIL.Image.open(lowerCAmelCase__ )
UpperCAmelCase = raw_image.convert("RGB" )
if self.image_transform is not None:
UpperCAmelCase = self.image_transform(lowerCAmelCase__ )
UpperCAmelCase = extract_label(lowerCAmelCase__ )
if self.label_to_id is not None:
UpperCAmelCase = self.label_to_id[label]
return {"image": image, "label": label}
def _lowerCAmelCase( __A , __A ):
# Initialize accelerator
if args.with_tracking:
UpperCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase = config["lr"]
UpperCAmelCase = int(config["num_epochs"] )
UpperCAmelCase = int(config["seed"] )
UpperCAmelCase = int(config["batch_size"] )
UpperCAmelCase = config["image_size"]
if not isinstance(__A , (list, tuple) ):
UpperCAmelCase = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , "isdigit" ):
if args.checkpointing_steps == "epoch":
UpperCAmelCase = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
UpperCAmelCase = int(args.checkpointing_steps )
else:
raise ValueError(
F"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed." )
else:
UpperCAmelCase = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
UpperCAmelCase = os.path.split(__A )[-1].split("." )[0]
accelerator.init_trackers(__A , __A )
# Grab all the image filenames
UpperCAmelCase = [os.path.join(args.data_dir , __A ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
UpperCAmelCase = [extract_label(__A ) for fname in file_names]
UpperCAmelCase = list(set(__A ) )
id_to_label.sort()
UpperCAmelCase = {lbl: i for i, lbl in enumerate(__A )}
# Set the seed before splitting the data.
np.random.seed(__A )
torch.manual_seed(__A )
torch.cuda.manual_seed_all(__A )
# Split our filenames between train and validation
UpperCAmelCase = np.random.permutation(len(__A ) )
UpperCAmelCase = int(0.8 * len(__A ) )
UpperCAmelCase = random_perm[:cut]
UpperCAmelCase = random_perm[cut:]
# For training we use a simple RandomResizedCrop
UpperCAmelCase = Compose([RandomResizedCrop(__A , scale=(0.5, 1.0) ), ToTensor()] )
UpperCAmelCase = PetsDataset(
[file_names[i] for i in train_split] , image_transform=__A , label_to_id=__A )
# For evaluation, we use a deterministic Resize
UpperCAmelCase = Compose([Resize(__A ), ToTensor()] )
UpperCAmelCase = PetsDataset([file_names[i] for i in eval_split] , image_transform=__A , label_to_id=__A )
# Instantiate dataloaders.
UpperCAmelCase = DataLoader(__A , shuffle=__A , batch_size=__A , num_workers=4 )
UpperCAmelCase = DataLoader(__A , shuffle=__A , batch_size=__A , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase = create_model("resnet50d" , pretrained=__A , num_classes=len(__A ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
UpperCAmelCase = False
for param in model.get_classifier().parameters():
UpperCAmelCase = True
# We normalize the batches of images to be a bit faster.
UpperCAmelCase = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
UpperCAmelCase = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
UpperCAmelCase = OneCycleLR(optimizer=__A , max_lr=__A , epochs=__A , steps_per_epoch=len(__A ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
__A , __A , __A , __A , __A )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase = 0
# We also need to keep track of the starting epoch so files are named properly
UpperCAmelCase = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"Resumed from checkpoint: {args.resume_from_checkpoint}" )
accelerator.load_state(args.resume_from_checkpoint )
UpperCAmelCase = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
UpperCAmelCase = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
UpperCAmelCase = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
UpperCAmelCase = os.path.splitext(__A )[0]
if "epoch" in training_difference:
UpperCAmelCase = int(training_difference.replace("epoch_" , "" ) ) + 1
UpperCAmelCase = None
else:
UpperCAmelCase = int(training_difference.replace("step_" , "" ) )
UpperCAmelCase = resume_step // len(__A )
resume_step -= starting_epoch * len(__A )
# Now we train the model
for epoch in range(__A , __A ):
model.train()
if args.with_tracking:
UpperCAmelCase = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
UpperCAmelCase = accelerator.skip_first_batches(__A , __A )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
UpperCAmelCase = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCAmelCase = (batch["image"] - mean) / std
UpperCAmelCase = model(__A )
UpperCAmelCase = torch.nn.functional.cross_entropy(__A , batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(__A )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(__A , __A ):
UpperCAmelCase = F"step_{overall_step}"
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
UpperCAmelCase = os.path.join(args.output_dir , __A )
accelerator.save_state(__A )
model.eval()
UpperCAmelCase = 0
UpperCAmelCase = 0
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCAmelCase = (batch["image"] - mean) / std
with torch.no_grad():
UpperCAmelCase = model(__A )
UpperCAmelCase = outputs.argmax(dim=-1 )
UpperCAmelCase , UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch["label"]) )
UpperCAmelCase = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
UpperCAmelCase = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}: {100 * eval_metric:.2f}" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(__A ),
"epoch": epoch,
} , step=__A , )
if checkpointing_steps == "epoch":
UpperCAmelCase = F"epoch_{epoch}"
if args.output_dir is not None:
UpperCAmelCase = os.path.join(args.output_dir , __A )
accelerator.save_state(__A )
if args.with_tracking:
accelerator.end_training()
def _lowerCAmelCase( ):
UpperCAmelCase = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir" , required=__A , help="The data folder on disk." )
parser.add_argument("--fp16" , action="store_true" , help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision" , type=__A , default=__A , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps" , type=__A , default=__A , help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch." , )
parser.add_argument(
"--output_dir" , type=__A , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=__A , default=__A , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=__A , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(__A , __A )
if __name__ == "__main__":
main()
| 1 | 1 |
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase ) -> list[str]:
'''simple docstring'''
return [sentence[i : i + ngram_size] for i in range(len(_UpperCamelCase ) - ngram_size + 1 )]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 306 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def __lowerCAmelCase ( _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None ) -> Any:
'''simple docstring'''
if attention_mask is None:
lowerCamelCase__: List[str] = tf.cast(tf.math.not_equal(_UpperCamelCase , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class lowerCamelCase__ :
__lowerCamelCase = OPTConfig
__lowerCamelCase = {}
__lowerCamelCase = """gelu"""
def __init__( self : Union[str, Any] , __a : List[Any] , __a : Dict=13 , __a : Dict=7 , __a : Optional[Any]=True , __a : Any=False , __a : Tuple=99 , __a : Optional[int]=16 , __a : Any=2 , __a : Optional[Any]=4 , __a : Union[str, Any]=4 , __a : Tuple="gelu" , __a : Optional[int]=0.1 , __a : int=0.1 , __a : List[Any]=20 , __a : Tuple=2 , __a : str=1 , __a : str=0 , __a : List[Any]=16 , __a : Optional[Any]=16 , ):
'''simple docstring'''
lowerCamelCase__: List[str] = parent
lowerCamelCase__: List[str] = batch_size
lowerCamelCase__: Dict = seq_length
lowerCamelCase__: List[str] = is_training
lowerCamelCase__: Dict = use_labels
lowerCamelCase__: Union[str, Any] = vocab_size
lowerCamelCase__: Union[str, Any] = hidden_size
lowerCamelCase__: Any = num_hidden_layers
lowerCamelCase__: Union[str, Any] = num_attention_heads
lowerCamelCase__: Tuple = intermediate_size
lowerCamelCase__: Optional[int] = hidden_act
lowerCamelCase__: Union[str, Any] = hidden_dropout_prob
lowerCamelCase__: str = attention_probs_dropout_prob
lowerCamelCase__: List[str] = max_position_embeddings
lowerCamelCase__: Tuple = eos_token_id
lowerCamelCase__: Any = pad_token_id
lowerCamelCase__: str = bos_token_id
lowerCamelCase__: Optional[int] = embed_dim
lowerCamelCase__: Union[str, Any] = word_embed_proj_dim
lowerCamelCase__: List[Any] = False
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__: List[Any] = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
lowerCamelCase__: Tuple = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
lowerCamelCase__: Optional[int] = tf.concat([input_ids, eos_tensor] , axis=1 )
lowerCamelCase__: Union[str, Any] = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=__a , **self.config_updates , )
lowerCamelCase__: Optional[Any] = prepare_opt_inputs_dict(__a , __a )
return config, inputs_dict
def lowerCamelCase_ ( self : str , __a : Optional[Any] , __a : Optional[int] ):
'''simple docstring'''
lowerCamelCase__: Optional[Any] = TFOPTModel(config=__a )
lowerCamelCase__: Optional[Any] = inputs_dict["""input_ids"""]
lowerCamelCase__: Dict = input_ids[:1, :]
lowerCamelCase__: Any = inputs_dict["""attention_mask"""][:1, :]
lowerCamelCase__: Any = 1
# first forward pass
lowerCamelCase__: str = model(__a , attention_mask=__a , use_cache=__a )
lowerCamelCase__ , lowerCamelCase__: Dict = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
lowerCamelCase__: Optional[Any] = ids_tensor((self.batch_size, 3) , config.vocab_size )
lowerCamelCase__: Dict = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
lowerCamelCase__: str = tf.concat([input_ids, next_tokens] , axis=-1 )
lowerCamelCase__: int = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
lowerCamelCase__: Any = model(__a , attention_mask=__a )[0]
lowerCamelCase__: Any = model(__a , attention_mask=__a , past_key_values=__a )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
lowerCamelCase__: str = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
lowerCamelCase__: Optional[int] = output_from_no_past[:, -3:, random_slice_idx]
lowerCamelCase__: List[str] = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(__a , __a , rtol=1e-3 )
@require_tf
class lowerCamelCase__ ( A__ , A__ , unittest.TestCase ):
__lowerCamelCase = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
__lowerCamelCase = (TFOPTForCausalLM,) if is_tf_available() else ()
__lowerCamelCase = (
{"""feature-extraction""": TFOPTModel, """text-generation""": TFOPTForCausalLM} if is_tf_available() else {}
)
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = False
__lowerCamelCase = 10
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowerCamelCase__: int = TFOPTModelTester(self )
lowerCamelCase__: Tuple = ConfigTester(self , config_class=__a )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowerCamelCase__: Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*__a )
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowerCamelCase__ , lowerCamelCase__: str = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(__a : Optional[int] , __a : Dict ):
if hasattr(__a , """weight""" ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(__a , """weight""" ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
lowerCamelCase__: int = model_class(config=__a )
lowerCamelCase__: Tuple = _get_word_embedding_weight(__a , model.get_input_embeddings() )
lowerCamelCase__: Optional[int] = _get_word_embedding_weight(__a , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(__a )
lowerCamelCase__: str = _get_word_embedding_weight(__a , model.get_input_embeddings() )
lowerCamelCase__: List[str] = _get_word_embedding_weight(__a , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
lowerCamelCase__: Any = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , __a )
# check that weights remain the same after resizing
lowerCamelCase__: Optional[Any] = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCamelCase__: Any = False
self.assertTrue(__a )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , __a )
lowerCamelCase__: List[Any] = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
lowerCamelCase__: List[Any] = False
self.assertTrue(__a )
def __lowerCAmelCase ( _UpperCamelCase ) -> Optional[Any]:
'''simple docstring'''
return tf.constant(_UpperCamelCase , dtype=tf.intaa )
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
__lowerCamelCase = 99
def lowerCamelCase_ ( self : Dict ):
'''simple docstring'''
lowerCamelCase__: int = tf.ones((4, 1) , dtype=tf.intaa ) * 2
lowerCamelCase__: Optional[int] = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
lowerCamelCase__: Any = input_ids.shape[0]
lowerCamelCase__: List[str] = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class lowerCamelCase__ ( unittest.TestCase ):
@slow
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__: int = TFOPTModel.from_pretrained("""facebook/opt-350m""" )
lowerCamelCase__: List[Any] = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
lowerCamelCase__: Optional[Any] = tf.not_equal(__a , model.config.pad_token_id )
with tf.GradientTape():
lowerCamelCase__: str = model(input_ids=__a , attention_mask=__a ).last_hidden_state
lowerCamelCase__: str = (1, 11, 512)
self.assertEqual(output.shape , __a )
lowerCamelCase__: str = tf.constant(
[[-0.2_873, -1.9_218, -0.3_033], [-1.2_710, -0.1_338, -0.1_902], [0.4_095, 0.1_214, -1.3_121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , __a , atol=4e-3 ) )
lowerCamelCase__: Optional[int] = tf.function(__a , jit_compile=__a )
lowerCamelCase__: List[Any] = xla_generate(__a , __a )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , __a , atol=4e-2 ) )
@require_tf
@slow
class lowerCamelCase__ ( unittest.TestCase ):
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
super().setUp()
lowerCamelCase__: List[Any] = """facebook/opt-350m"""
def lowerCamelCase_ ( self : Tuple ):
'''simple docstring'''
lowerCamelCase__: Dict = TFOPTForCausalLM.from_pretrained(self.path_model )
lowerCamelCase__: Dict = GPTaTokenizer.from_pretrained(self.path_model )
lowerCamelCase__: Union[str, Any] = [
"""Today is a beautiful day and I want to""",
"""In the city of""",
"""Paris is the capital of France and""",
"""Computers and mobile phones have taken""",
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
lowerCamelCase__: Union[str, Any] = tokenizer(__a , return_tensors="""tf""" , padding=__a , add_special_tokens=__a )
lowerCamelCase__: Union[str, Any] = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
lowerCamelCase__: Dict = tf.constant(
[
[1.3_851, -13.8_923, -10.5_229, -10.7_533, -0.2_309, -10.2_384, -0.5_365, -9.0_947, -5.1_670],
[-4.7_073, -10.6_276, -3.9_415, -21.5_242, -0.2_822, -0.2_822, -0.2_822, -0.2_822, -0.2_822],
[0.6_247, -3.4_229, -8.9_179, -1.4_297, -14.1_650, 1.4_146, -9.0_218, -0.2_703, -0.2_703],
[6.4_783, -1.9_913, -10.7_926, -2.3_336, 1.5_092, -0.9_974, -6.8_213, 1.3_477, 1.3_477],
] )
self.assertTrue(np.allclose(__a , __a , atol=1e-4 ) )
lowerCamelCase__: Any = tf.function(__a , jit_compile=__a )
lowerCamelCase__: List[Any] = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(__a , __a , atol=1e-4 ) )
@require_tf
@slow
class lowerCamelCase__ ( unittest.TestCase ):
@property
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
lowerCamelCase__: Union[str, Any] = """facebook/opt-125m"""
lowerCamelCase__: Dict = [
"""Today is a beautiful day and I want to""",
"""In the city of New York, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
lowerCamelCase__: Any = []
lowerCamelCase__: Optional[Any] = GPTaTokenizer.from_pretrained(__a )
lowerCamelCase__: str = TFOPTForCausalLM.from_pretrained(__a )
for prompt in self.prompts:
lowerCamelCase__: Dict = tokenizer(__a , return_tensors="""tf""" ).input_ids
lowerCamelCase__: Any = model.generate(__a , max_length=10 )
lowerCamelCase__: Optional[int] = tokenizer.batch_decode(__a , skip_special_tokens=__a )
predicted_outputs += generated_string
self.assertListEqual(__a , __a )
def lowerCamelCase_ ( self : Optional[Any] ):
'''simple docstring'''
lowerCamelCase__: List[Any] = """facebook/opt-350m"""
lowerCamelCase__: Tuple = GPTaTokenizer.from_pretrained(__a )
lowerCamelCase__: Any = TFOPTForCausalLM.from_pretrained(__a )
lowerCamelCase__: Tuple = """left"""
# use different length sentences to test batching
lowerCamelCase__: Tuple = [
"""Hello, my dog is a little""",
"""Today, I""",
]
lowerCamelCase__: List[Any] = tokenizer(__a , return_tensors="""tf""" , padding=__a )
lowerCamelCase__: Any = inputs["""input_ids"""]
lowerCamelCase__: int = model.generate(input_ids=__a , attention_mask=inputs["""attention_mask"""] )
lowerCamelCase__: Optional[int] = tokenizer(sentences[0] , return_tensors="""tf""" ).input_ids
lowerCamelCase__: Optional[Any] = model.generate(input_ids=__a )
lowerCamelCase__: int = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs["""attention_mask"""][-1] , tf.intaa ) )
lowerCamelCase__: Dict = tokenizer(sentences[1] , return_tensors="""tf""" ).input_ids
lowerCamelCase__: str = model.generate(input_ids=__a , max_length=model.config.max_length - num_paddings )
lowerCamelCase__: List[str] = tokenizer.batch_decode(__a , skip_special_tokens=__a )
lowerCamelCase__: Optional[int] = tokenizer.decode(output_non_padded[0] , skip_special_tokens=__a )
lowerCamelCase__: Union[str, Any] = tokenizer.decode(output_padded[0] , skip_special_tokens=__a )
lowerCamelCase__: Tuple = [
"""Hello, my dog is a little bit of a dork.\nI'm a little bit""",
"""Today, I was in the middle of a conversation with a friend about the""",
]
self.assertListEqual(__a , __a )
self.assertListEqual(__a , [non_padded_sentence, padded_sentence] )
def lowerCamelCase_ ( self : List[str] ):
'''simple docstring'''
lowerCamelCase__: Dict = """facebook/opt-350m"""
lowerCamelCase__: Tuple = [
"""Today is a beautiful day and I want to""",
"""In the city of San Francisco, the city""",
"""Paris is the capital of France and the capital""",
"""Computers and mobile phones have taken over the""",
]
lowerCamelCase__: Dict = []
lowerCamelCase__: int = GPTaTokenizer.from_pretrained(__a )
lowerCamelCase__: List[Any] = TFOPTForCausalLM.from_pretrained(__a )
for prompt in self.prompts:
lowerCamelCase__: str = tokenizer(__a , return_tensors="""tf""" ).input_ids
lowerCamelCase__: Optional[int] = model.generate(__a , max_length=10 )
lowerCamelCase__: Any = tokenizer.batch_decode(__a , skip_special_tokens=__a )
predicted_outputs += generated_string
self.assertListEqual(__a , __a )
| 306 | 1 |
"""simple docstring"""
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__lowerCamelCase :Tuple = {
'iou_prediction_head.layers.0': 'iou_prediction_head.proj_in',
'iou_prediction_head.layers.1': 'iou_prediction_head.layers.0',
'iou_prediction_head.layers.2': 'iou_prediction_head.proj_out',
'mask_decoder.output_upscaling.0': 'mask_decoder.upscale_conv1',
'mask_decoder.output_upscaling.1': 'mask_decoder.upscale_layer_norm',
'mask_decoder.output_upscaling.3': 'mask_decoder.upscale_conv2',
'mask_downscaling.0': 'mask_embed.conv1',
'mask_downscaling.1': 'mask_embed.layer_norm1',
'mask_downscaling.3': 'mask_embed.conv2',
'mask_downscaling.4': 'mask_embed.layer_norm2',
'mask_downscaling.6': 'mask_embed.conv3',
'point_embeddings': 'point_embed',
'pe_layer.positional_encoding_gaussian_matrix': 'shared_embedding.positional_embedding',
'image_encoder': 'vision_encoder',
'neck.0': 'neck.conv1',
'neck.1': 'neck.layer_norm1',
'neck.2': 'neck.conv2',
'neck.3': 'neck.layer_norm2',
'patch_embed.proj': 'patch_embed.projection',
'.norm': '.layer_norm',
'blocks': 'layers',
}
def snake_case ( UpperCamelCase__ : List[Any] ) -> int:
lowerCamelCase : Union[str, Any] = {}
state_dict.pop("""pixel_mean""" , lowerCamelCase_ )
state_dict.pop("""pixel_std""" , lowerCamelCase_ )
lowerCamelCase : str = R'.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
lowerCamelCase : int = key.replace(lowerCamelCase_ , lowerCamelCase_ )
if re.match(lowerCamelCase_ , lowerCamelCase_ ):
lowerCamelCase : int = int(re.match(lowerCamelCase_ , lowerCamelCase_ ).group(2 ) )
if layer_nb == 0:
lowerCamelCase : Optional[Any] = key.replace("""layers.0""" , """proj_in""" )
elif layer_nb == 1:
lowerCamelCase : List[str] = key.replace("""layers.1""" , """layers.0""" )
elif layer_nb == 2:
lowerCamelCase : List[Any] = key.replace("""layers.2""" , """proj_out""" )
lowerCamelCase : Any = value
lowerCamelCase : int = model_state_dict[
'prompt_encoder.shared_embedding.positional_embedding'
]
return model_state_dict
def snake_case ( UpperCamelCase__ : Union[str, Any] , UpperCamelCase__ : int , UpperCamelCase__ : Dict , UpperCamelCase__ : int="ybelkada/segment-anything" ) -> Any:
lowerCamelCase : Dict = hf_hub_download(lowerCamelCase_ , F'checkpoints/{model_name}.pth' )
if "sam_vit_b" in model_name:
lowerCamelCase : Tuple = SamConfig()
elif "sam_vit_l" in model_name:
lowerCamelCase : Union[str, Any] = SamVisionConfig(
hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
lowerCamelCase : Tuple = SamConfig(
vision_config=lowerCamelCase_ , )
elif "sam_vit_h" in model_name:
lowerCamelCase : Optional[Any] = SamVisionConfig(
hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
lowerCamelCase : str = SamConfig(
vision_config=lowerCamelCase_ , )
lowerCamelCase : Optional[int] = torch.load(lowerCamelCase_ , map_location="""cpu""" )
lowerCamelCase : Union[str, Any] = replace_keys(lowerCamelCase_ )
lowerCamelCase : int = SamImageProcessor()
lowerCamelCase : Dict = SamProcessor(image_processor=lowerCamelCase_ )
lowerCamelCase : Union[str, Any] = SamModel(lowerCamelCase_ )
hf_model.load_state_dict(lowerCamelCase_ )
lowerCamelCase : Dict = hf_model.to("""cuda""" )
lowerCamelCase : Dict = 'https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'
lowerCamelCase : Any = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert("""RGB""" )
lowerCamelCase : Dict = [[[400, 650]]]
lowerCamelCase : List[str] = [[1]]
lowerCamelCase : Any = processor(images=np.array(lowerCamelCase_ ) , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
lowerCamelCase : Tuple = hf_model(**lowerCamelCase_ )
lowerCamelCase : str = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_7_9_8_9_0_2_5_1_1_5_9_6_6_8
lowerCamelCase : Union[str, Any] = processor(
images=np.array(lowerCamelCase_ ) , input_points=lowerCamelCase_ , input_labels=lowerCamelCase_ , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
lowerCamelCase : List[str] = hf_model(**lowerCamelCase_ )
lowerCamelCase : List[Any] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_7_1_2_6_0_3_0_9_2_1_9_3_6_0_4
lowerCamelCase : Tuple = ((75, 275, 1725, 850),)
lowerCamelCase : str = processor(images=np.array(lowerCamelCase_ ) , input_boxes=lowerCamelCase_ , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
lowerCamelCase : Tuple = hf_model(**lowerCamelCase_ )
lowerCamelCase : Union[str, Any] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8_6_8_6_0_1_5_6_0_5_9_2_6_5_1_4
# Test with 2 points and 1 image.
lowerCamelCase : str = [[[400, 650], [800, 650]]]
lowerCamelCase : Optional[int] = [[1, 1]]
lowerCamelCase : Any = processor(
images=np.array(lowerCamelCase_ ) , input_points=lowerCamelCase_ , input_labels=lowerCamelCase_ , return_tensors="""pt""" ).to("""cuda""" )
with torch.no_grad():
lowerCamelCase : List[str] = hf_model(**lowerCamelCase_ )
lowerCamelCase : Optional[int] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9_9_3_6_0_4_7_7_9_2_4_3_4_6_9_2
if __name__ == "__main__":
__lowerCamelCase :Union[str, Any] = argparse.ArgumentParser()
__lowerCamelCase :Dict = ['sam_vit_b_01ec64', 'sam_vit_h_4b8939', 'sam_vit_l_0b3195']
parser.add_argument(
'--model_name',
default='sam_vit_h_4b8939',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
parser.add_argument(
'--model_hub_id',
default='ybelkada/segment-anything',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
__lowerCamelCase :Union[str, Any] = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 706 |
"""simple docstring"""
import unittest
from transformers import AlbertTokenizer, AlbertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase :str = get_tests_dir('fixtures/spiece.model')
@require_sentencepiece
@require_tokenizers
class A__ ( __lowercase , unittest.TestCase):
"""simple docstring"""
snake_case__ : List[str] =AlbertTokenizer
snake_case__ : Optional[Any] =AlbertTokenizerFast
snake_case__ : Optional[int] =True
snake_case__ : Any =True
snake_case__ : Optional[int] =True
def a__ ( self: Dict )-> Optional[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
lowerCamelCase : int = AlbertTokenizer(__a )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self: Tuple , __a: Tuple )-> Union[str, Any]:
lowerCamelCase : List[str] = """this is a test"""
lowerCamelCase : int = """this is a test"""
return input_text, output_text
def a__ ( self: Any )-> List[Any]:
lowerCamelCase : int = """<pad>"""
lowerCamelCase : Optional[int] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__a ) , __a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__a ) , __a )
def a__ ( self: Tuple )-> str:
lowerCamelCase : Optional[int] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , """<pad>""" )
self.assertEqual(vocab_keys[1] , """<unk>""" )
self.assertEqual(vocab_keys[-1] , """▁eloquent""" )
self.assertEqual(len(__a ) , 30_000 )
def a__ ( self: List[str] )-> Any:
self.assertEqual(self.get_tokenizer().vocab_size , 30_000 )
def a__ ( self: Optional[Any] )-> Union[str, Any]:
if not self.test_rust_tokenizer:
return
lowerCamelCase : str = self.get_tokenizer()
lowerCamelCase : Tuple = self.get_rust_tokenizer()
lowerCamelCase : Union[str, Any] = """I was born in 92000, and this is falsé."""
lowerCamelCase : List[str] = tokenizer.tokenize(__a )
lowerCamelCase : Tuple = rust_tokenizer.tokenize(__a )
self.assertListEqual(__a , __a )
lowerCamelCase : Dict = tokenizer.encode(__a , add_special_tokens=__a )
lowerCamelCase : List[str] = rust_tokenizer.encode(__a , add_special_tokens=__a )
self.assertListEqual(__a , __a )
lowerCamelCase : Any = self.get_rust_tokenizer()
lowerCamelCase : List[str] = tokenizer.encode(__a )
lowerCamelCase : str = rust_tokenizer.encode(__a )
self.assertListEqual(__a , __a )
def a__ ( self: Tuple )-> List[Any]:
lowerCamelCase : List[str] = AlbertTokenizer(__a , keep_accents=__a )
lowerCamelCase : int = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(__a , ["""▁this""", """▁is""", """▁a""", """▁test"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) , [48, 25, 21, 1_289] )
lowerCamelCase : List[str] = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
__a , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """é""", """."""] )
lowerCamelCase : List[str] = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(__a , [31, 23, 386, 19, 561, 3_050, 15, 17, 48, 25, 8_256, 18, 1, 9] )
lowerCamelCase : Tuple = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , ["""▁i""", """▁was""", """▁born""", """▁in""", """▁9""", """2000""", """,""", """▁and""", """▁this""", """▁is""", """▁fal""", """s""", """<unk>""", """."""] , )
def a__ ( self: Tuple )-> str:
lowerCamelCase : str = AlbertTokenizer(__a )
lowerCamelCase : Union[str, Any] = tokenizer.encode("""sequence builders""" )
lowerCamelCase : List[Any] = tokenizer.encode("""multi-sequence build""" )
lowerCamelCase : Any = tokenizer.build_inputs_with_special_tokens(__a )
lowerCamelCase : str = tokenizer.build_inputs_with_special_tokens(__a , __a )
assert encoded_sentence == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id]
assert encoded_pair == [tokenizer.cls_token_id] + text + [tokenizer.sep_token_id] + text_a + [
tokenizer.sep_token_id
]
@slow
def a__ ( self: Any )-> Dict:
# fmt: off
lowerCamelCase : Optional[Any] = {"""attention_mask""": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """input_ids""": [[2, 21_970, 13, 5, 6_092, 167, 28, 7_103, 2_153, 673, 8, 7_028, 12_051, 18, 17, 7_103, 2_153, 673, 8, 3_515, 18_684, 8, 4_461, 6, 1_927, 297, 8, 12_060, 2_607, 18, 13, 5, 4_461, 15, 10_538, 38, 8, 135, 15, 822, 58, 15, 993, 10_363, 15, 1_460, 8_005, 4_461, 15, 993, 255, 2_328, 9, 9, 9, 6, 26, 1_112, 816, 3_260, 13, 5, 103, 2_377, 6, 17, 1_112, 816, 2_782, 13, 5, 103, 10_641, 6, 29, 84, 2_512, 2_430, 782, 18_684, 2_761, 19, 808, 2_430, 2_556, 17, 855, 1_480, 9_477, 4_091, 128, 11_712, 15, 7_103, 2_153, 673, 17, 24_883, 9_990, 9, 3], [2, 11_502, 25, 1_006, 20, 782, 8, 11_809, 855, 1_732, 19_393, 18_667, 37, 367, 21_018, 69, 1_854, 34, 11_860, 19_124, 27, 156, 225, 17, 193, 4_141, 19, 65, 9_124, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [2, 14, 2_231, 886, 2_385, 17_659, 84, 14, 16_792, 1_952, 9, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], """token_type_ids""": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=__a , model_name="""albert-base-v2""" , revision="""6b6560eaf5ff2e250b00c50f380c5389a9c2d82e""" , )
| 42 | 0 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
UpperCamelCase__ = None
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase__ = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
},
'tokenizer_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json',
},
}
UpperCamelCase__ = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
UpperCamelCase__ = '▁'
# Segments (not really needed)
UpperCamelCase__ = 0
UpperCamelCase__ = 1
UpperCamelCase__ = 2
UpperCamelCase__ = 3
UpperCamelCase__ = 4
class UpperCAmelCase__ ( A_ ):
'''simple docstring'''
UpperCAmelCase_ = VOCAB_FILES_NAMES
UpperCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ = '''left'''
UpperCAmelCase_ = XLNetTokenizer
def __init__( self : int , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[int]=None , UpperCamelCase : Tuple=False , UpperCamelCase : str=True , UpperCamelCase : Optional[int]=False , UpperCamelCase : Union[str, Any]="<s>" , UpperCamelCase : List[str]="</s>" , UpperCamelCase : Union[str, Any]="<unk>" , UpperCamelCase : str="<sep>" , UpperCamelCase : Optional[Any]="<pad>" , UpperCamelCase : List[str]="<cls>" , UpperCamelCase : Tuple="<mask>" , UpperCamelCase : Union[str, Any]=["<eop>", "<eod>"] , **UpperCamelCase : List[str] , ):
"""simple docstring"""
_lowercase : Optional[Any] = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else mask_token
super().__init__(
vocab_file=UpperCamelCase , tokenizer_file=UpperCamelCase , do_lower_case=UpperCamelCase , remove_space=UpperCamelCase , keep_accents=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , additional_special_tokens=UpperCamelCase , **UpperCamelCase , )
_lowercase : Dict = 3
_lowercase : str = do_lower_case
_lowercase : str = remove_space
_lowercase : Dict = keep_accents
_lowercase : Any = vocab_file
_lowercase : Optional[Any] = False if not self.vocab_file else True
def lowerCAmelCase_ ( self : str , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
_lowercase : Any = [self.sep_token_id]
_lowercase : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowerCAmelCase_ ( self : Union[str, Any] , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
"""simple docstring"""
_lowercase : Tuple = [self.sep_token_id]
_lowercase : Union[str, Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowerCAmelCase_ ( self : str , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
'''Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '''
'''tokenizer.''' )
if not os.path.isdir(UpperCamelCase ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
_lowercase : Union[str, Any] = os.path.join(
UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ):
copyfile(self.vocab_file , UpperCamelCase )
return (out_vocab_file,) | 322 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
UpperCamelCase__ = {'configuration_speech_encoder_decoder': ['SpeechEncoderDecoderConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['SpeechEncoderDecoderModel']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase__ = ['FlaxSpeechEncoderDecoderModel']
if TYPE_CHECKING:
from .configuration_speech_encoder_decoder import SpeechEncoderDecoderConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_encoder_decoder import SpeechEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_speech_encoder_decoder import FlaxSpeechEncoderDecoderModel
else:
import sys
UpperCamelCase__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 322 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
A__ : Tuple = {
"""configuration_convnext""": ["""CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ConvNextConfig""", """ConvNextOnnxConfig"""]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Tuple = ["""ConvNextFeatureExtractor"""]
A__ : Optional[Any] = ["""ConvNextImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[int] = [
"""CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConvNextForImageClassification""",
"""ConvNextModel""",
"""ConvNextPreTrainedModel""",
"""ConvNextBackbone""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : Optional[Any] = [
"""TFConvNextForImageClassification""",
"""TFConvNextModel""",
"""TFConvNextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
A__ : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure)
| 705 |
"""simple docstring"""
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class _lowercase :
'''simple docstring'''
_A = 42
# setable values
_A = 42
_A = 42
_A = None
@classmethod
def lowerCAmelCase__ ( cls , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )-> Union[str, Any]:
return cls(common=__UpperCamelCase , init_noise_sigma=__UpperCamelCase , timesteps=__UpperCamelCase )
@dataclass
class _lowercase ( lowerCAmelCase_ ):
'''simple docstring'''
_A = 42
class _lowercase ( lowerCAmelCase_ , lowerCAmelCase_ ):
'''simple docstring'''
_A = [e.name for e in FlaxKarrasDiffusionSchedulers]
_A = 42
@property
def lowerCAmelCase__ ( self )-> Optional[int]:
return True
@register_to_config
def __init__( self , __UpperCamelCase = 10_00 , __UpperCamelCase = 0.0001 , __UpperCamelCase = 0.02 , __UpperCamelCase = "linear" , __UpperCamelCase = None , __UpperCamelCase = "fixed_small" , __UpperCamelCase = True , __UpperCamelCase = "epsilon" , __UpperCamelCase = jnp.floataa , )-> List[str]:
UpperCAmelCase__ : int = dtype
def lowerCAmelCase__ ( self , __UpperCamelCase = None )-> DDPMSchedulerState:
if common is None:
UpperCAmelCase__ : int = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
UpperCAmelCase__ : Tuple = jnp.array(1.0 , dtype=self.dtype )
UpperCAmelCase__ : Tuple = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__UpperCamelCase , init_noise_sigma=__UpperCamelCase , timesteps=__UpperCamelCase , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None )-> jnp.ndarray:
return sample
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = () )-> DDPMSchedulerState:
UpperCAmelCase__ : Dict = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
UpperCAmelCase__ : Optional[int] = (jnp.arange(0 , __UpperCamelCase ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__UpperCamelCase , timesteps=__UpperCamelCase , )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None )-> Union[str, Any]:
UpperCAmelCase__ : Optional[Any] = state.common.alphas_cumprod[t]
UpperCAmelCase__ : int = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCAmelCase__ : Any = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
UpperCAmelCase__ : Union[str, Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
UpperCAmelCase__ : Dict = jnp.clip(__UpperCamelCase , a_min=1E-20 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
UpperCAmelCase__ : Tuple = jnp.log(jnp.clip(__UpperCamelCase , a_min=1E-20 ) )
elif variance_type == "fixed_large":
UpperCAmelCase__ : Union[str, Any] = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
UpperCAmelCase__ : Optional[int] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
UpperCAmelCase__ : List[str] = variance
UpperCAmelCase__ : Union[str, Any] = state.common.betas[t]
UpperCAmelCase__ : Optional[int] = (predicted_variance + 1) / 2
UpperCAmelCase__ : Any = frac * max_log + (1 - frac) * min_log
return variance
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase = None , __UpperCamelCase = True , )-> Union[FlaxDDPMSchedulerOutput, Tuple]:
UpperCAmelCase__ : List[str] = timestep
if key is None:
UpperCAmelCase__ : int = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
UpperCAmelCase__ , UpperCAmelCase__ : List[str] = jnp.split(__UpperCamelCase , sample.shape[1] , axis=1 )
else:
UpperCAmelCase__ : Optional[Any] = None
# 1. compute alphas, betas
UpperCAmelCase__ : Union[str, Any] = state.common.alphas_cumprod[t]
UpperCAmelCase__ : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
UpperCAmelCase__ : Union[str, Any] = 1 - alpha_prod_t
UpperCAmelCase__ : Tuple = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCAmelCase__ : List[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCAmelCase__ : Any = model_output
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase__ : Union[str, Any] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` "
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCAmelCase__ : List[Any] = jnp.clip(__UpperCamelCase , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : List[str] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
UpperCAmelCase__ : List[Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCAmelCase__ : Tuple = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
UpperCAmelCase__ : Any = jax.random.split(__UpperCamelCase , num=1 )
UpperCAmelCase__ : int = jax.random.normal(__UpperCamelCase , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__UpperCamelCase , __UpperCamelCase , predicted_variance=__UpperCamelCase ) ** 0.5) * noise
UpperCAmelCase__ : Dict = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
UpperCAmelCase__ : Any = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__UpperCamelCase , state=__UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> jnp.ndarray:
return add_noise_common(state.common , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def lowerCAmelCase__ ( self , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , )-> jnp.ndarray:
return get_velocity_common(state.common , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def __len__( self )-> Tuple:
return self.config.num_train_timesteps
| 660 | 0 |
"""simple docstring"""
from itertools import product
from cva import COLOR_BGR2GRAY, cvtColor, imread, imshow, waitKey
from numpy import dot, exp, mgrid, pi, ravel, square, uinta, zeros
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Any:
"""simple docstring"""
lowerCAmelCase__ :Dict = k_size // 2
lowerCAmelCase__ , lowerCAmelCase__ :Tuple = mgrid[0 - center : k_size - center, 0 - center : k_size - center]
lowerCAmelCase__ :List[Any] = 1 / (2 * pi * sigma) * exp(-(square(_SCREAMING_SNAKE_CASE ) + square(_SCREAMING_SNAKE_CASE )) / (2 * square(_SCREAMING_SNAKE_CASE )) )
return g
def __A (_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ->Optional[int]:
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ :Optional[Any] = image.shape[0], image.shape[1]
# dst image height and width
lowerCAmelCase__ :Union[str, Any] = height - k_size + 1
lowerCAmelCase__ :Optional[Any] = width - k_size + 1
# im2col, turn the k_size*k_size pixels into a row and np.vstack all rows
lowerCAmelCase__ :Optional[int] = zeros((dst_height * dst_width, k_size * k_size) )
lowerCAmelCase__ :Tuple = 0
for i, j in product(range(_SCREAMING_SNAKE_CASE ) , range(_SCREAMING_SNAKE_CASE ) ):
lowerCAmelCase__ :Union[str, Any] = ravel(image[i : i + k_size, j : j + k_size] )
lowerCAmelCase__ :Optional[Any] = window
row += 1
# turn the kernel into shape(k*k, 1)
lowerCAmelCase__ :str = gen_gaussian_kernel(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :str = ravel(_SCREAMING_SNAKE_CASE )
# reshape and get the dst image
lowerCAmelCase__ :Optional[Any] = dot(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).reshape(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ).astype(_SCREAMING_SNAKE_CASE )
return dst
if __name__ == "__main__":
# read original image
__A = imread(R"""../image_data/lena.jpg""")
# turn image in gray scale value
__A = cvtColor(img, COLOR_BGR2GRAY)
# get values with two different mask size
__A = gaussian_filter(gray, 3, sigma=1)
__A = gaussian_filter(gray, 5, sigma=0.8)
# show result images
imshow("""gaussian filter with 3x3 mask""", gaussianaxa)
imshow("""gaussian filter with 5x5 mask""", gaussianaxa)
waitKey()
| 93 |
def _lowercase ( __lowerCamelCase : int ) -> bool:
'''simple docstring'''
UpperCamelCase__ : Tuple = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 344 | 0 |
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def _lowerCAmelCase ( __lowerCamelCase : str , __lowerCamelCase : complex , __lowerCamelCase : str = "x" , __lowerCamelCase : float = 10**-10 , __lowerCamelCase : int = 1 , ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : int = symbols(__lowerCamelCase )
__SCREAMING_SNAKE_CASE : Union[str, Any] = lambdify(__lowerCamelCase , __lowerCamelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = lambdify(__lowerCamelCase , diff(__lowerCamelCase , __lowerCamelCase ) )
__SCREAMING_SNAKE_CASE : Dict = starting_point
while True:
if diff_function(__lowerCamelCase ) != 0:
__SCREAMING_SNAKE_CASE : List[Any] = prev_guess - multiplicity * func(__lowerCamelCase ) / diff_function(
__lowerCamelCase )
else:
raise ZeroDivisionError("Could not find root" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
__SCREAMING_SNAKE_CASE : Dict = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
# Find fourth Root of 5
print(f'''The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}''')
# Find value of e
print(
"""The root of log(y) - 1 = 0 is """,
f'''{newton_raphson('log(y) - 1', 2, variable='y')}''',
)
# Exponential Roots
print(
"""The root of exp(x) - 1 = 0 is""",
f'''{newton_raphson('exp(x) - 1', 10, precision=0.0_0_5)}''',
)
# Find root of cos(x)
print(f'''The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}''')
| 447 |
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
_lowerCamelCase = logging.getLogger(__name__)
require_version("""pytorch_lightning>=1.0.4""")
_lowerCamelCase = {
"""base""": AutoModel,
"""sequence-classification""": AutoModelForSequenceClassification,
"""question-answering""": AutoModelForQuestionAnswering,
"""pretraining""": AutoModelForPreTraining,
"""token-classification""": AutoModelForTokenClassification,
"""language-modeling""": AutoModelWithLMHead,
"""summarization""": AutoModelForSeqaSeqLM,
"""translation""": AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
_lowerCamelCase = {
"""linear""": get_linear_schedule_with_warmup,
"""cosine""": get_cosine_schedule_with_warmup,
"""cosine_w_restarts""": get_cosine_with_hard_restarts_schedule_with_warmup,
"""polynomial""": get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
_lowerCamelCase = sorted(arg_to_scheduler.keys())
_lowerCamelCase = """{""" + """, """.join(arg_to_scheduler_choices) + """}"""
class _SCREAMING_SNAKE_CASE (pl.LightningModule ):
def __init__( self : Dict , UpperCamelCase : argparse.Namespace , UpperCamelCase : str=None , UpperCamelCase : Tuple="base" , UpperCamelCase : Optional[Any]=None , UpperCamelCase : List[Any]=None , UpperCamelCase : Optional[int]=None , **UpperCamelCase : int , )->Dict:
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(UpperCamelCase )
__SCREAMING_SNAKE_CASE : int = 0
__SCREAMING_SNAKE_CASE : List[Any] = Path(self.hparams.output_dir )
__SCREAMING_SNAKE_CASE : Dict = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__SCREAMING_SNAKE_CASE : List[Any] = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({"num_labels": num_labels} if num_labels is not None else {}) , cache_dir=UpperCamelCase , **UpperCamelCase , )
else:
__SCREAMING_SNAKE_CASE : PretrainedConfig = config
__SCREAMING_SNAKE_CASE : Any = ("encoder_layerdrop", "decoder_layerdrop", "dropout", "attention_dropout")
for p in extra_model_params:
if getattr(self.hparams , UpperCamelCase , UpperCamelCase ):
assert hasattr(self.config , UpperCamelCase ), F"""model config doesn't have a `{p}` attribute"""
setattr(self.config , UpperCamelCase , getattr(self.hparams , UpperCamelCase ) )
if tokenizer is None:
__SCREAMING_SNAKE_CASE : Optional[int] = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=UpperCamelCase , )
else:
__SCREAMING_SNAKE_CASE : PreTrainedTokenizer = tokenizer
__SCREAMING_SNAKE_CASE : str = MODEL_MODES[mode]
if model is None:
__SCREAMING_SNAKE_CASE : Optional[Any] = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool(".ckpt" in self.hparams.model_name_or_path ) , config=self.config , cache_dir=UpperCamelCase , )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = model
def __snake_case ( self : Dict , *UpperCamelCase : Dict , **UpperCamelCase : Union[str, Any] )->Optional[Any]:
__SCREAMING_SNAKE_CASE : List[Any] = self.model_type.from_pretrained(*UpperCamelCase , **UpperCamelCase )
def __snake_case ( self : Optional[Any] )->Tuple:
__SCREAMING_SNAKE_CASE : Tuple = arg_to_scheduler[self.hparams.lr_scheduler]
__SCREAMING_SNAKE_CASE : List[str] = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
__SCREAMING_SNAKE_CASE : Union[str, Any] = {"scheduler": scheduler, "interval": "step", "frequency": 1}
return scheduler
def __snake_case ( self : int )->Tuple:
__SCREAMING_SNAKE_CASE : List[Any] = self.model
__SCREAMING_SNAKE_CASE : Any = ["bias", "LayerNorm.weight"]
__SCREAMING_SNAKE_CASE : Optional[int] = [
{
"params": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
"weight_decay": self.hparams.weight_decay,
},
{
"params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
"weight_decay": 0.0,
},
]
if self.hparams.adafactor:
__SCREAMING_SNAKE_CASE : Dict = Adafactor(
UpperCamelCase , lr=self.hparams.learning_rate , scale_parameter=UpperCamelCase , relative_step=UpperCamelCase )
else:
__SCREAMING_SNAKE_CASE : Tuple = AdamW(
UpperCamelCase , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
__SCREAMING_SNAKE_CASE : List[Any] = optimizer
__SCREAMING_SNAKE_CASE : Any = self.get_lr_scheduler()
return [optimizer], [scheduler]
def __snake_case ( self : Dict , UpperCamelCase : Any , UpperCamelCase : Any )->Optional[Any]:
return self.validation_step(UpperCamelCase , UpperCamelCase )
def __snake_case ( self : List[Any] , UpperCamelCase : Tuple )->List[Any]:
return self.validation_end(UpperCamelCase )
def __snake_case ( self : str )->int:
__SCREAMING_SNAKE_CASE : Union[str, Any] = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
__SCREAMING_SNAKE_CASE : List[Any] = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def __snake_case ( self : Union[str, Any] , UpperCamelCase : Tuple )->Optional[Any]:
if stage == "test":
__SCREAMING_SNAKE_CASE : List[str] = len(self.test_dataloader().dataset )
else:
__SCREAMING_SNAKE_CASE : Optional[int] = self.get_dataloader("train" , self.hparams.train_batch_size , shuffle=UpperCamelCase )
__SCREAMING_SNAKE_CASE : Tuple = len(self.train_dataloader().dataset )
def __snake_case ( self : Optional[int] , UpperCamelCase : str , UpperCamelCase : int , UpperCamelCase : bool = False )->List[Any]:
raise NotImplementedError("You must implement this for your task" )
def __snake_case ( self : List[Any] )->Union[str, Any]:
return self.train_loader
def __snake_case ( self : Tuple )->Union[str, Any]:
return self.get_dataloader("dev" , self.hparams.eval_batch_size , shuffle=UpperCamelCase )
def __snake_case ( self : Optional[Any] )->Any:
return self.get_dataloader("test" , self.hparams.eval_batch_size , shuffle=UpperCamelCase )
def __snake_case ( self : int , UpperCamelCase : List[str] )->int:
return os.path.join(
self.hparams.data_dir , "cached_{}_{}_{}".format(
UpperCamelCase , list(filter(UpperCamelCase , self.hparams.model_name_or_path.split("/" ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def __snake_case ( self : Union[str, Any] , UpperCamelCase : Dict[str, Any] )->None:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.output_dir.joinpath("best_tfmr" )
__SCREAMING_SNAKE_CASE : List[Any] = self.step_count
self.model.save_pretrained(UpperCamelCase )
self.tokenizer.save_pretrained(UpperCamelCase )
@staticmethod
def __snake_case ( UpperCamelCase : Any , UpperCamelCase : Optional[Any] )->List[str]:
parser.add_argument(
"--model_name_or_path" , default=UpperCamelCase , type=UpperCamelCase , required=UpperCamelCase , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--config_name" , default="" , type=UpperCamelCase , help="Pretrained config name or path if not the same as model_name" )
parser.add_argument(
"--tokenizer_name" , default=UpperCamelCase , type=UpperCamelCase , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument(
"--cache_dir" , default=str(Path(UpperCamelCase ).parent / "test_run" / "cache" ) , type=UpperCamelCase , help="Where do you want to store the pre-trained models downloaded from huggingface.co" , )
parser.add_argument(
"--encoder_layerdrop" , type=UpperCamelCase , help="Encoder layer dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--decoder_layerdrop" , type=UpperCamelCase , help="Decoder layer dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--dropout" , type=UpperCamelCase , help="Dropout probability (Optional). Goes into model.config" , )
parser.add_argument(
"--attention_dropout" , type=UpperCamelCase , help="Attention dropout probability (Optional). Goes into model.config" , )
parser.add_argument("--learning_rate" , default=5E-5 , type=UpperCamelCase , help="The initial learning rate for Adam." )
parser.add_argument(
"--lr_scheduler" , default="linear" , choices=UpperCamelCase , metavar=UpperCamelCase , type=UpperCamelCase , help="Learning rate scheduler" , )
parser.add_argument("--weight_decay" , default=0.0 , type=UpperCamelCase , help="Weight decay if we apply some." )
parser.add_argument("--adam_epsilon" , default=1E-8 , type=UpperCamelCase , help="Epsilon for Adam optimizer." )
parser.add_argument("--warmup_steps" , default=0 , type=UpperCamelCase , help="Linear warmup over warmup_steps." )
parser.add_argument("--num_workers" , default=4 , type=UpperCamelCase , help="kwarg passed to DataLoader" )
parser.add_argument("--num_train_epochs" , dest="max_epochs" , default=3 , type=UpperCamelCase )
parser.add_argument("--train_batch_size" , default=3_2 , type=UpperCamelCase )
parser.add_argument("--eval_batch_size" , default=3_2 , type=UpperCamelCase )
parser.add_argument("--adafactor" , action="store_true" )
class _SCREAMING_SNAKE_CASE (pl.Callback ):
def __snake_case ( self : Dict , UpperCamelCase : Optional[int] , UpperCamelCase : Tuple )->Tuple:
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class _SCREAMING_SNAKE_CASE (pl.Callback ):
def __snake_case ( self : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Optional[Any] )->Tuple:
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(UpperCamelCase )
class _SCREAMING_SNAKE_CASE (pl.Callback ):
def __snake_case ( self : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : Dict )->List[Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = trainer.lr_schedulers[0]["scheduler"]
__SCREAMING_SNAKE_CASE : Optional[int] = {F"""lr_group_{i}""": lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(UpperCamelCase )
def __snake_case ( self : Tuple , UpperCamelCase : pl.Trainer , UpperCamelCase : pl.LightningModule )->Union[str, Any]:
rank_zero_info("***** Validation results *****" )
__SCREAMING_SNAKE_CASE : Optional[Any] = trainer.callback_metrics
# Log results
for key in sorted(UpperCamelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(UpperCamelCase , str(metrics[key] ) ) )
def __snake_case ( self : str , UpperCamelCase : pl.Trainer , UpperCamelCase : pl.LightningModule )->Optional[Any]:
rank_zero_info("***** Test results *****" )
__SCREAMING_SNAKE_CASE : List[Any] = trainer.callback_metrics
# Log and save results to file
__SCREAMING_SNAKE_CASE : List[str] = os.path.join(pl_module.hparams.output_dir , "test_results.txt" )
with open(UpperCamelCase , "w" ) as writer:
for key in sorted(UpperCamelCase ):
if key not in ["log", "progress_bar"]:
rank_zero_info("{} = {}\n".format(UpperCamelCase , str(metrics[key] ) ) )
writer.write("{} = {}\n".format(UpperCamelCase , str(metrics[key] ) ) )
def _lowerCAmelCase ( __lowerCamelCase : List[str] , __lowerCamelCase : int ):
"""simple docstring"""
parser.add_argument(
"--output_dir" , default=str(Path(__lowerCamelCase ).parent / "test_run" / "model_checkpoints" ) , type=__lowerCamelCase , help="The output directory where the model predictions and checkpoints will be written." , )
parser.add_argument(
"--fp16" , action="store_true" , help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit" , )
parser.add_argument(
"--fp16_opt_level" , type=__lowerCamelCase , default="O2" , help=(
"For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
"See details at https://nvidia.github.io/apex/amp.html"
) , )
parser.add_argument("--n_tpu_cores" , dest="tpu_cores" , type=__lowerCamelCase )
parser.add_argument("--max_grad_norm" , dest="gradient_clip_val" , default=1.0 , type=__lowerCamelCase , help="Max gradient norm" )
parser.add_argument("--do_train" , action="store_true" , help="Whether to run training." )
parser.add_argument("--do_predict" , action="store_true" , help="Whether to run predictions on the test set." )
parser.add_argument(
"--gradient_accumulation_steps" , dest="accumulate_grad_batches" , type=__lowerCamelCase , default=1 , help="Number of updates steps to accumulate before performing a backward/update pass." , )
parser.add_argument("--seed" , type=__lowerCamelCase , default=42 , help="random seed for initialization" )
parser.add_argument(
"--data_dir" , default=str(Path(__lowerCamelCase ).parent / "test_run" / "dummy-train-data" ) , type=__lowerCamelCase , help="The input data dir. Should contain the training files for the CoNLL-2003 NER task." , )
def _lowerCAmelCase ( __lowerCamelCase : BaseTransformer , __lowerCamelCase : argparse.Namespace , __lowerCamelCase : Union[str, Any]=None , __lowerCamelCase : List[Any]=True , __lowerCamelCase : List[Any]=[] , __lowerCamelCase : Any=None , __lowerCamelCase : Optional[int]=None , **__lowerCamelCase : Any , ):
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
__SCREAMING_SNAKE_CASE : List[str] = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=__lowerCamelCase )
# add custom checkpoints
if checkpoint_callback is None:
__SCREAMING_SNAKE_CASE : Tuple = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix="checkpoint" , monitor="val_loss" , mode="min" , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(__lowerCamelCase )
if logging_callback is None:
__SCREAMING_SNAKE_CASE : List[str] = LoggingCallback()
__SCREAMING_SNAKE_CASE : str = {}
if args.fpaa:
__SCREAMING_SNAKE_CASE : Any = 16
if args.gpus > 1:
__SCREAMING_SNAKE_CASE : List[Any] = "auto"
__SCREAMING_SNAKE_CASE : List[Any] = "ddp"
__SCREAMING_SNAKE_CASE : List[str] = args.accumulate_grad_batches
__SCREAMING_SNAKE_CASE : List[str] = None
__SCREAMING_SNAKE_CASE : Optional[int] = "auto"
__SCREAMING_SNAKE_CASE : Optional[int] = pl.Trainer.from_argparse_args(
__lowerCamelCase , weights_summary=__lowerCamelCase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=__lowerCamelCase , val_check_interval=1 , num_sanity_val_steps=2 , **__lowerCamelCase , )
if args.do_train:
trainer.fit(__lowerCamelCase )
else:
print("RAG modeling tests with new set functions successfuly executed!" )
return trainer
| 447 | 1 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
_A = "examples/"
_A = {
"examples": (re.compile(R'^check_min_version\(\"[^\"]+\"\)\s*$', re.MULTILINE), "check_min_version(\"VERSION\")\n"),
"init": (re.compile(R'^__version__\s+=\s+\"([^\"]+)\"\s*$', re.MULTILINE), "__version__ = \"VERSION\"\n"),
"setup": (re.compile(R'^(\s*)version\s*=\s*\"[^\"]+\",', re.MULTILINE), r"\1version=\"VERSION\","),
"doc": (re.compile(R'^(\s*)release\s*=\s*\"[^\"]+\"$', re.MULTILINE), "release = \"VERSION\"\n"),
}
_A = {
"init": "src/diffusers/__init__.py",
"setup": "setup.py",
}
_A = "README.md"
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
with open(lowerCamelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
SCREAMING_SNAKE_CASE__ = f.read()
SCREAMING_SNAKE_CASE__ = REPLACE_PATTERNS[pattern]
SCREAMING_SNAKE_CASE__ = replace.replace("VERSION" , lowerCamelCase_ )
SCREAMING_SNAKE_CASE__ = re_pattern.sub(lowerCamelCase_ , lowerCamelCase_ )
with open(lowerCamelCase_ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.write(lowerCamelCase_ )
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase ) -> int:
for folder, directories, fnames in os.walk(lowerCamelCase_ ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("research_projects" )
if "legacy" in directories:
directories.remove("legacy" )
for fname in fnames:
if fname.endswith(".py" ):
update_version_in_file(os.path.join(lowerCamelCase_ , lowerCamelCase_ ) , lowerCamelCase_ , pattern="examples" )
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase , __UpperCAmelCase=False ) -> List[str]:
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
if not patch:
update_version_in_examples(lowerCamelCase_ )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = '''🤗 Transformers currently provides the following architectures'''
SCREAMING_SNAKE_CASE__ = '''1. Want to contribute a new model?'''
with open(lowerCamelCase_ , "r" , encoding="utf-8" , newline="\n" ) as f:
SCREAMING_SNAKE_CASE__ = f.readlines()
# Find the start of the list.
SCREAMING_SNAKE_CASE__ = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
SCREAMING_SNAKE_CASE__ = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("1." ):
SCREAMING_SNAKE_CASE__ = lines[index].replace(
"https://huggingface.co/docs/diffusers/main/model_doc" , "https://huggingface.co/docs/diffusers/model_doc" , )
index += 1
with open(lowerCamelCase_ , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lowerCamelCase_ )
def SCREAMING_SNAKE_CASE ( ) -> Any:
with open(REPLACE_FILES["init"] , "r" ) as f:
SCREAMING_SNAKE_CASE__ = f.read()
SCREAMING_SNAKE_CASE__ = REPLACE_PATTERNS['''init'''][0].search(lowerCamelCase_ ).groups()[0]
return packaging.version.parse(lowerCamelCase_ )
def SCREAMING_SNAKE_CASE ( __UpperCAmelCase=False ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = get_version()
if patch and default_version.is_devrelease:
raise ValueError("Can\'t create a patch version from the dev branch, checkout a released version!" )
if default_version.is_devrelease:
SCREAMING_SNAKE_CASE__ = default_version.base_version
elif patch:
SCREAMING_SNAKE_CASE__ = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
SCREAMING_SNAKE_CASE__ = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
SCREAMING_SNAKE_CASE__ = input(F"""Which version are you releasing? [{default_version}]""" )
if len(lowerCamelCase_ ) == 0:
SCREAMING_SNAKE_CASE__ = default_version
print(F"""Updating version to {version}.""" )
global_version_update(lowerCamelCase_ , patch=lowerCamelCase_ )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = get_version()
SCREAMING_SNAKE_CASE__ = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
SCREAMING_SNAKE_CASE__ = current_version.base_version
# Check with the user we got that right.
SCREAMING_SNAKE_CASE__ = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(lowerCamelCase_ ) == 0:
SCREAMING_SNAKE_CASE__ = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(lowerCamelCase_ )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
_A = argparse.ArgumentParser()
parser.add_argument('--post_release', action='store_true', help='Whether this is pre or post release.')
parser.add_argument('--patch', action='store_true', help='Whether or not this is a patch release.')
_A = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('Nothing to do after a patch :-)')
else:
post_release_work()
| 159 |
from ..utils import DummyObject, requires_backends
class __lowercase ( metaclass=_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ : Optional[Any] = ['''speech''']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Dict:
requires_backends(self , ['''speech'''] )
class __lowercase ( metaclass=_SCREAMING_SNAKE_CASE ):
"""simple docstring"""
UpperCAmelCase_ : int = ['''speech''']
def __init__( self , *__UpperCAmelCase , **__UpperCAmelCase ) -> Tuple:
requires_backends(self , ['''speech'''] )
| 542 | 0 |
def A ( snake_case__ : List[Any] , snake_case__ : List[str] ) -> List[Any]:
'''simple docstring'''
__snake_case = len(snake_case__ )
__snake_case = len(snake_case__ )
__snake_case = [[False for _ in range(m + 1 )] for _ in range(n + 1 )]
__snake_case = True
for i in range(snake_case__ ):
for j in range(m + 1 ):
if dp[i][j]:
if j < m and a[i].upper() == b[j]:
__snake_case = True
if a[i].islower():
__snake_case = True
return dp[n][m]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 714 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 676 | 0 |
import numpy as np
from cva import destroyAllWindows, imread, imshow, waitKey
class _lowercase :
def __init__( self : int , lowerCamelCase__ : Any , lowerCamelCase__ : int , lowerCamelCase__ : int ) -> int:
"""simple docstring"""
if dst_width < 0 or dst_height < 0:
raise ValueError('''Destination width/height should be > 0''' )
A_ = img
A_ = img.shape[1]
A_ = img.shape[0]
A_ = dst_width
A_ = dst_height
A_ = self.src_w / self.dst_w
A_ = self.src_h / self.dst_h
A_ = A_ = (
np.ones((self.dst_h, self.dst_w, 3) , np.uinta ) * 2_5_5
)
def UpperCamelCase ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
for i in range(self.dst_h ):
for j in range(self.dst_w ):
A_ = self.img[self.get_y(snake_case__ )][self.get_x(snake_case__ )]
def UpperCamelCase ( self : Dict , lowerCamelCase__ : int ) -> Optional[int]:
"""simple docstring"""
return int(self.ratio_x * x )
def UpperCamelCase ( self : str , lowerCamelCase__ : int ) -> Dict:
"""simple docstring"""
return int(self.ratio_y * y )
if __name__ == "__main__":
__lowercase = 800, 600
__lowercase = imread("""image_data/lena.jpg""", 1)
__lowercase = NearestNeighbour(im, dst_w, dst_h)
n.process()
imshow(
f'Image resized from: {im.shape[1]}x{im.shape[0]} to {dst_w}x{dst_h}', n.output
)
waitKey(0)
destroyAllWindows()
| 203 |
from __future__ import annotations
from math import gcd
def __lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : int = 2 , _UpperCamelCase : int = 1 , _UpperCamelCase : int = 3 , ) -> int | None:
'''simple docstring'''
if num < 2:
raise ValueError('The input value cannot be less than 2' )
# Because of the relationship between ``f(f(x))`` and ``f(x)``, this
# algorithm struggles to find factors that are divisible by two.
# As a workaround, we specifically check for two and even inputs.
# See: https://math.stackexchange.com/a/2856214/165820
if num > 2 and num % 2 == 0:
return 2
# Pollard's Rho algorithm requires a function that returns pseudorandom
# values between 0 <= X < ``num``. It doesn't need to be random in the
# sense that the output value is cryptographically secure or difficult
# to calculate, it only needs to be random in the sense that all output
# values should be equally likely to appear.
# For this reason, Pollard suggested using ``f(x) = (x**2 - 1) % num``
# However, the success of Pollard's algorithm isn't guaranteed and is
# determined in part by the initial seed and the chosen random function.
# To make retries easier, we will instead use ``f(x) = (x**2 + C) % num``
# where ``C`` is a value that we can modify between each attempt.
def rand_fn(_UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : int ) -> int:
return (pow(_UpperCamelCase , 2 ) + step) % modulus
for _ in range(_UpperCamelCase ):
# These track the position within the cycle detection logic.
SCREAMING_SNAKE_CASE = seed
SCREAMING_SNAKE_CASE = seed
while True:
# At each iteration, the tortoise moves one step and the hare moves two.
SCREAMING_SNAKE_CASE = rand_fn(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = rand_fn(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
SCREAMING_SNAKE_CASE = rand_fn(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# At some point both the tortoise and the hare will enter a cycle whose
# length ``p`` is a divisor of ``num``. Once in that cycle, at some point
# the tortoise and hare will end up on the same value modulo ``p``.
# We can detect when this happens because the position difference between
# the tortoise and the hare will share a common divisor with ``num``.
SCREAMING_SNAKE_CASE = gcd(hare - tortoise , _UpperCamelCase )
if divisor == 1:
# No common divisor yet, just keep searching.
continue
else:
# We found a common divisor!
if divisor == num:
# Unfortunately, the divisor is ``num`` itself and is useless.
break
else:
# The divisor is a nontrivial factor of ``num``!
return divisor
# If we made it here, then this attempt failed.
# We need to pick a new starting seed for the tortoise and hare
# in addition to a new step value for the random function.
# To keep this example implementation deterministic, the
# new values will be generated based on currently available
# values instead of using something like ``random.randint``.
# We can use the hare's position as the new seed.
# This is actually what Richard Brent's the "optimized" variant does.
SCREAMING_SNAKE_CASE = hare
# The new step value for the random function can just be incremented.
# At first the results will be similar to what the old function would
# have produced, but the value will quickly diverge after a bit.
step += 1
# We haven't found a divisor within the requested number of attempts.
# We were unlucky or ``num`` itself is actually prime.
return None
if __name__ == "__main__":
import argparse
a_ : int = argparse.ArgumentParser()
parser.add_argument(
"num",
type=int,
help="The value to find a divisor of",
)
parser.add_argument(
"--attempts",
type=int,
default=3,
help="The number of attempts before giving up",
)
a_ : Tuple = parser.parse_args()
a_ : int = pollard_rho(args.num, attempts=args.attempts)
if divisor is None:
print(F"""{args.num} is probably prime""")
else:
a_ : Union[str, Any] = args.num // divisor
print(F"""{args.num} = {divisor} * {quotient}""")
| 439 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_lowerCAmelCase = logging.get_logger(__name__)
_lowerCAmelCase = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""w2v_encoder.proj""": """lm_head""",
"""mask_emb""": """masked_spec_embed""",
}
def UpperCamelCase ( _A , _A , _A , _A , _A ) -> int:
for attribute in key.split(""".""" ):
lowercase : Union[str, Any] = getattr(_A , _A )
if weight_type is not None:
lowercase : Optional[Any] = getattr(_A , _A ).shape
else:
lowercase : Any = hf_pointer.shape
assert hf_shape == value.shape, (
F"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
F""" {value.shape} for {full_name}"""
)
if weight_type == "weight":
lowercase : List[str] = value
elif weight_type == "weight_g":
lowercase : Tuple = value
elif weight_type == "weight_v":
lowercase : int = value
elif weight_type == "bias":
lowercase : List[str] = value
else:
lowercase : Optional[int] = value
logger.info(F"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def UpperCamelCase ( _A , _A , _A ) -> Any:
lowercase : Tuple = []
lowercase : Dict = fairseq_model.state_dict()
lowercase : int = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
lowercase : str = False
if "conv_layers" in name:
load_conv_layer(
_A , _A , _A , _A , hf_model.config.feat_extract_norm == """group""" , )
lowercase : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
lowercase : Dict = "hubert." + mapped_key if (is_finetuned and mapped_key != "lm_head") else mapped_key
if key in name or (key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0] and not is_finetuned):
lowercase : Union[str, Any] = True
if "*" in mapped_key:
lowercase : Optional[Any] = name.split(_A )[0].split(""".""" )[-2]
lowercase : Union[str, Any] = mapped_key.replace("""*""" , _A )
if "weight_g" in name:
lowercase : Dict = "weight_g"
elif "weight_v" in name:
lowercase : Optional[Any] = "weight_v"
elif "weight" in name:
lowercase : Dict = "weight"
elif "bias" in name:
lowercase : str = "bias"
else:
lowercase : str = None
set_recursively(_A , _A , _A , _A , _A )
continue
if not is_used:
unused_weights.append(_A )
logger.warning(F"""Unused weights: {unused_weights}""" )
def UpperCamelCase ( _A , _A , _A , _A , _A ) -> Optional[Any]:
lowercase : List[Any] = full_name.split("""conv_layers.""" )[-1]
lowercase : int = name.split(""".""" )
lowercase : Union[str, Any] = int(items[0] )
lowercase : Any = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."""
)
lowercase : Optional[int] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."""
)
lowercase : List[Any] = value
logger.info(F"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"""{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"""
" found."
)
lowercase : List[str] = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"""{full_name} has size {value.shape}, but"""
F""" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."""
)
lowercase : str = value
logger.info(F"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(_A )
@torch.no_grad()
def UpperCamelCase ( _A , _A , _A=None , _A=None , _A=True ) -> str:
if config_path is not None:
lowercase : List[Any] = HubertConfig.from_pretrained(_A )
else:
lowercase : Union[str, Any] = HubertConfig()
if is_finetuned:
if dict_path:
lowercase : Tuple = Dictionary.load(_A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
lowercase : Tuple = target_dict.pad_index
lowercase : List[str] = target_dict.bos_index
lowercase : Dict = target_dict.eos_index
lowercase : Tuple = len(target_dict.symbols )
lowercase : List[Any] = os.path.join(_A , """vocab.json""" )
if not os.path.isdir(_A ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(_A ) )
return
os.makedirs(_A , exist_ok=_A )
with open(_A , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(target_dict.indices , _A )
lowercase : List[Any] = WavaVecaCTCTokenizer(
_A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=_A , )
lowercase : Union[str, Any] = True if config.feat_extract_norm == "layer" else False
lowercase : List[Any] = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16_000 , padding_value=0 , do_normalize=_A , return_attention_mask=_A , )
lowercase : Union[str, Any] = WavaVecaProcessor(feature_extractor=_A , tokenizer=_A )
processor.save_pretrained(_A )
lowercase : Optional[Any] = HubertForCTC(_A )
else:
lowercase : List[str] = HubertModel(_A )
if is_finetuned:
lowercase : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
lowercase : Union[str, Any] = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
lowercase : List[str] = model[0].eval()
recursively_load_weights(_A , _A , _A )
hf_wavavec.save_pretrained(_A )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
_lowerCAmelCase = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 719 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class UpperCamelCase (unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[int] = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_SCREAMING_SNAKE_CASE : int = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def __snake_case ( self :Any , __magic_name__ :Optional[int] , __magic_name__ :Tuple , __magic_name__ :List[str] ) ->Optional[Any]:
lowercase : List[Any] = TextaTextGenerationPipeline(model=__magic_name__ , tokenizer=__magic_name__ )
return generator, ["Something to write", "Something else"]
def __snake_case ( self :Tuple , __magic_name__ :List[Any] , __magic_name__ :int ) ->Optional[Any]:
lowercase : Optional[Any] = generator("""Something there""" )
self.assertEqual(__magic_name__ , [{"""generated_text""": ANY(__magic_name__ )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["""generated_text"""].startswith("""Something there""" ) )
lowercase : int = generator(["""This is great !""", """Something else"""] , num_return_sequences=2 , do_sample=__magic_name__ )
self.assertEqual(
__magic_name__ , [
[{"""generated_text""": ANY(__magic_name__ )}, {"""generated_text""": ANY(__magic_name__ )}],
[{"""generated_text""": ANY(__magic_name__ )}, {"""generated_text""": ANY(__magic_name__ )}],
] , )
lowercase : Dict = generator(
["""This is great !""", """Something else"""] , num_return_sequences=2 , batch_size=2 , do_sample=__magic_name__ )
self.assertEqual(
__magic_name__ , [
[{"""generated_text""": ANY(__magic_name__ )}, {"""generated_text""": ANY(__magic_name__ )}],
[{"""generated_text""": ANY(__magic_name__ )}, {"""generated_text""": ANY(__magic_name__ )}],
] , )
with self.assertRaises(__magic_name__ ):
generator(4 )
@require_torch
def __snake_case ( self :int ) ->Any:
lowercase : Union[str, Any] = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""pt""" )
# do_sample=False necessary for reproducibility
lowercase : List[Any] = generator("""Something there""" , do_sample=__magic_name__ )
self.assertEqual(__magic_name__ , [{"""generated_text""": """"""}] )
lowercase : Dict = 3
lowercase : Optional[Any] = generator(
"""Something there""" , num_return_sequences=__magic_name__ , num_beams=__magic_name__ , )
lowercase : Tuple = [
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """Beide Beide Beide Beide Beide Beide Beide Beide"""},
{"""generated_text""": """"""},
]
self.assertEqual(__magic_name__ , __magic_name__ )
lowercase : Dict = generator("""This is a test""" , do_sample=__magic_name__ , num_return_sequences=2 , return_tensors=__magic_name__ )
self.assertEqual(
__magic_name__ , [
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
] , )
lowercase : List[Any] = generator.model.config.eos_token_id
lowercase : Dict = """<pad>"""
lowercase : Optional[Any] = generator(
["""This is a test""", """This is a second test"""] , do_sample=__magic_name__ , num_return_sequences=2 , batch_size=2 , return_tensors=__magic_name__ , )
self.assertEqual(
__magic_name__ , [
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
[
{"""generated_token_ids""": ANY(torch.Tensor )},
{"""generated_token_ids""": ANY(torch.Tensor )},
],
] , )
@require_tf
def __snake_case ( self :Optional[int] ) ->List[str]:
lowercase : Dict = pipeline("""text2text-generation""" , model="""patrickvonplaten/t5-tiny-random""" , framework="""tf""" )
# do_sample=False necessary for reproducibility
lowercase : List[Any] = generator("""Something there""" , do_sample=__magic_name__ )
self.assertEqual(__magic_name__ , [{"""generated_text""": """"""}] )
| 348 | 0 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__UpperCamelCase : Optional[int] = logging.get_logger(__name__)
__UpperCamelCase : Any = {'''tokenizer_file''': '''tokenizer.json'''}
__UpperCamelCase : Any = {
'''tokenizer_file''': {
'''bigscience/tokenizer''': '''https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json''',
'''bigscience/bloom-560m''': '''https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json''',
'''bigscience/bloom-1b1''': '''https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json''',
'''bigscience/bloom-1b7''': '''https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json''',
'''bigscience/bloom-3b''': '''https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json''',
'''bigscience/bloom-7b1''': '''https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json''',
'''bigscience/bloom''': '''https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json''',
},
}
class a ( _a ):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = ['''input_ids''', '''attention_mask''']
snake_case__ = None
def __init__( self , _snake_case=None , _snake_case=None , _snake_case=None , _snake_case="<unk>" , _snake_case="<s>" , _snake_case="</s>" , _snake_case="<pad>" , _snake_case=False , _snake_case=False , **_snake_case , ):
"""simple docstring"""
super().__init__(
snake_case_ , snake_case_ , tokenizer_file=snake_case_ , unk_token=snake_case_ , bos_token=snake_case_ , eos_token=snake_case_ , pad_token=snake_case_ , add_prefix_space=snake_case_ , clean_up_tokenization_spaces=snake_case_ , **snake_case_ , )
lowerCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , snake_case_ ) != add_prefix_space:
lowerCAmelCase = getattr(snake_case_ , pre_tok_state.pop('type' ) )
lowerCAmelCase = add_prefix_space
lowerCAmelCase = pre_tok_class(**snake_case_ )
lowerCAmelCase = add_prefix_space
def UpperCamelCase__ ( self , *_snake_case , **_snake_case ):
"""simple docstring"""
lowerCAmelCase = kwargs.get('is_split_into_words' , snake_case_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
' pretokenized inputs.' )
return super()._batch_encode_plus(*snake_case_ , **snake_case_ )
def UpperCamelCase__ ( self , *_snake_case , **_snake_case ):
"""simple docstring"""
lowerCAmelCase = kwargs.get('is_split_into_words' , snake_case_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
' pretokenized inputs.' )
return super()._encode_plus(*snake_case_ , **snake_case_ )
def UpperCamelCase__ ( self , _snake_case , _snake_case = None ):
"""simple docstring"""
lowerCAmelCase = self._tokenizer.model.save(snake_case_ , name=snake_case_ )
return tuple(snake_case_ )
def UpperCamelCase__ ( self , _snake_case ):
"""simple docstring"""
lowerCAmelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(snake_case_ , add_special_tokens=snake_case_ ) + [self.eos_token_id] )
if len(snake_case_ ) > self.model_max_length:
lowerCAmelCase = input_ids[-self.model_max_length :]
return input_ids
| 4 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
from filelock import FileLock
from transformers import PreTrainedTokenizer, is_tf_available, is_torch_available
__a = logging.getLogger(__name__)
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowercase = 42
lowercase = 42
lowercase = 42
@dataclass
class UpperCAmelCase_ :
"""simple docstring"""
lowercase = 42
lowercase = 42
lowercase = None
lowercase = None
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = "train"
lowercase = "dev"
lowercase = "test"
class UpperCAmelCase_ :
"""simple docstring"""
@staticmethod
def lowerCamelCase ( snake_case_ : Optional[Any] , snake_case_ : Union[Split, str] ):
raise NotImplementedError
@staticmethod
def lowerCamelCase ( snake_case_ : str ):
raise NotImplementedError
@staticmethod
def lowerCamelCase ( snake_case_ : List[InputExample] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : PreTrainedTokenizer , snake_case_ : Optional[int]=False , snake_case_ : Dict="[CLS]" , snake_case_ : str=1 , snake_case_ : Dict="[SEP]" , snake_case_ : List[str]=False , snake_case_ : int=False , snake_case_ : Tuple=0 , snake_case_ : Union[str, Any]=0 , snake_case_ : List[Any]=-100 , snake_case_ : Any=0 , snake_case_ : Union[str, Any]=True , ):
snake_case__ : int = {label: i for i, label in enumerate(snake_case_ )}
snake_case__ : List[Any] = []
for ex_index, example in enumerate(snake_case_ ):
if ex_index % 10_000 == 0:
logger.info("""Writing example %d of %d""" , snake_case_ , len(snake_case_ ) )
snake_case__ : Tuple = []
snake_case__ : Dict = []
for word, label in zip(example.words , example.labels ):
snake_case__ : Any = tokenizer.tokenize(snake_case_ )
# bert-base-multilingual-cased sometimes output "nothing ([]) when calling tokenize with just a space.
if len(snake_case_ ) > 0:
tokens.extend(snake_case_ )
# Use the real label id for the first token of the word, and padding ids for the remaining tokens
label_ids.extend([label_map[label]] + [pad_token_label_id] * (len(snake_case_ ) - 1) )
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
snake_case__ : Dict = tokenizer.num_special_tokens_to_add()
if len(snake_case_ ) > max_seq_length - special_tokens_count:
snake_case__ : Tuple = tokens[: (max_seq_length - special_tokens_count)]
snake_case__ : Tuple = label_ids[: (max_seq_length - special_tokens_count)]
# The convention in BERT is:
# (a) For sequence pairs:
# tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]
# type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1
# (b) For single sequences:
# tokens: [CLS] the dog is hairy . [SEP]
# type_ids: 0 0 0 0 0 0 0
#
# Where "type_ids" are used to indicate whether this is the first
# sequence or the second sequence. The embedding vectors for `type=0` and
# `type=1` were learned during pre-training and are added to the wordpiece
# embedding vector (and position vector). This is not *strictly* necessary
# since the [SEP] token unambiguously separates the sequences, but it makes
# it easier for the model to learn the concept of sequences.
#
# For classification tasks, the first vector (corresponding to [CLS]) is
# used as the "sentence vector". Note that this only makes sense because
# the entire model is fine-tuned.
tokens += [sep_token]
label_ids += [pad_token_label_id]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
label_ids += [pad_token_label_id]
snake_case__ : Dict = [sequence_a_segment_id] * len(snake_case_ )
if cls_token_at_end:
tokens += [cls_token]
label_ids += [pad_token_label_id]
segment_ids += [cls_token_segment_id]
else:
snake_case__ : str = [cls_token] + tokens
snake_case__ : Union[str, Any] = [pad_token_label_id] + label_ids
snake_case__ : Any = [cls_token_segment_id] + segment_ids
snake_case__ : Union[str, Any] = tokenizer.convert_tokens_to_ids(snake_case_ )
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
snake_case__ : Tuple = [1 if mask_padding_with_zero else 0] * len(snake_case_ )
# Zero-pad up to the sequence length.
snake_case__ : Dict = max_seq_length - len(snake_case_ )
if pad_on_left:
snake_case__ : Optional[Any] = ([pad_token] * padding_length) + input_ids
snake_case__ : Union[str, Any] = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
snake_case__ : Union[str, Any] = ([pad_token_segment_id] * padding_length) + segment_ids
snake_case__ : Union[str, Any] = ([pad_token_label_id] * padding_length) + label_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
label_ids += [pad_token_label_id] * padding_length
assert len(snake_case_ ) == max_seq_length
assert len(snake_case_ ) == max_seq_length
assert len(snake_case_ ) == max_seq_length
assert len(snake_case_ ) == max_seq_length
if ex_index < 5:
logger.info("""*** Example ***""" )
logger.info("""guid: %s""" , example.guid )
logger.info("""tokens: %s""" , """ """.join([str(snake_case_ ) for x in tokens] ) )
logger.info("""input_ids: %s""" , """ """.join([str(snake_case_ ) for x in input_ids] ) )
logger.info("""input_mask: %s""" , """ """.join([str(snake_case_ ) for x in input_mask] ) )
logger.info("""segment_ids: %s""" , """ """.join([str(snake_case_ ) for x in segment_ids] ) )
logger.info("""label_ids: %s""" , """ """.join([str(snake_case_ ) for x in label_ids] ) )
if "token_type_ids" not in tokenizer.model_input_names:
snake_case__ : Tuple = None
features.append(
InputFeatures(
input_ids=snake_case_ , attention_mask=snake_case_ , token_type_ids=snake_case_ , label_ids=snake_case_ ) )
return features
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = 42
lowercase = nn.CrossEntropyLoss().ignore_index
def __init__( self : int , snake_case_ : TokenClassificationTask , snake_case_ : str , snake_case_ : PreTrainedTokenizer , snake_case_ : List[str] , snake_case_ : str , snake_case_ : Optional[int] = None , snake_case_ : Tuple=False , snake_case_ : Split = Split.train , ):
# Load data features from cache or dataset file
snake_case__ : Tuple = os.path.join(
snake_case_ , """cached_{}_{}_{}""".format(mode.value , tokenizer.__class__.__name__ , str(snake_case_ ) ) , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case__ : Dict = cached_features_file + """.lock"""
with FileLock(snake_case_ ):
if os.path.exists(snake_case_ ) and not overwrite_cache:
logger.info(f"Loading features from cached file {cached_features_file}" )
snake_case__ : Tuple = torch.load(snake_case_ )
else:
logger.info(f"Creating features from dataset file at {data_dir}" )
snake_case__ : Any = token_classification_task.read_examples_from_file(snake_case_ , snake_case_ )
# TODO clean up all this to leverage built-in features of tokenizers
snake_case__ : Any = token_classification_task.convert_examples_to_features(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=snake_case_ , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
logger.info(f"Saving features into cached file {cached_features_file}" )
torch.save(self.features , snake_case_ )
def __len__( self : str ):
return len(self.features )
def __getitem__( self : int , snake_case_ : Dict ):
return self.features[i]
if is_tf_available():
import tensorflow as tf
class UpperCAmelCase_ :
"""simple docstring"""
lowercase = 42
lowercase = -1_00
def __init__( self : List[str] , snake_case_ : TokenClassificationTask , snake_case_ : str , snake_case_ : PreTrainedTokenizer , snake_case_ : List[str] , snake_case_ : str , snake_case_ : Optional[int] = None , snake_case_ : Any=False , snake_case_ : Split = Split.train , ):
snake_case__ : int = token_classification_task.read_examples_from_file(snake_case_ , snake_case_ )
# TODO clean up all this to leverage built-in features of tokenizers
snake_case__ : Optional[Any] = token_classification_task.convert_examples_to_features(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , cls_token_at_end=bool(model_type in ["""xlnet"""] ) , cls_token=tokenizer.cls_token , cls_token_segment_id=2 if model_type in ["""xlnet"""] else 0 , sep_token=tokenizer.sep_token , sep_token_extra=snake_case_ , pad_on_left=bool(tokenizer.padding_side == """left""" ) , pad_token=tokenizer.pad_token_id , pad_token_segment_id=tokenizer.pad_token_type_id , pad_token_label_id=self.pad_token_label_id , )
def gen():
for ex in self.features:
if ex.token_type_ids is None:
yield (
{"input_ids": ex.input_ids, "attention_mask": ex.attention_mask},
ex.label_ids,
)
else:
yield (
{
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label_ids,
)
if "token_type_ids" not in tokenizer.model_input_names:
snake_case__ : str = tf.data.Dataset.from_generator(
snake_case_ , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa}, tf.intaa) , (
{"""input_ids""": tf.TensorShape([None] ), """attention_mask""": tf.TensorShape([None] )},
tf.TensorShape([None] ),
) , )
else:
snake_case__ : str = tf.data.Dataset.from_generator(
snake_case_ , ({"""input_ids""": tf.intaa, """attention_mask""": tf.intaa, """token_type_ids""": tf.intaa}, tf.intaa) , (
{
"""input_ids""": tf.TensorShape([None] ),
"""attention_mask""": tf.TensorShape([None] ),
"""token_type_ids""": tf.TensorShape([None] ),
},
tf.TensorShape([None] ),
) , )
def lowerCamelCase ( self : Optional[Any] ):
snake_case__ : Optional[Any] = self.dataset.apply(tf.data.experimental.assert_cardinality(len(self.features ) ) )
return self.dataset
def __len__( self : Optional[Any] ):
return len(self.features )
def __getitem__( self : str , snake_case_ : List[str] ):
return self.features[i]
| 374 | 0 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
lowerCAmelCase__ = version.parse(importlib_metadata.version("nltk"))
if NLTK_VERSION >= version.Version("3.6.4"):
from nltk import word_tokenize
lowerCAmelCase__ = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
lowerCAmelCase__ = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
lowerCAmelCase__ = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __magic_name__ ( datasets.Metric ):
def _UpperCamelCase ( self : int ) -> str:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"] , reference_urls=[
"https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score",
"https://en.wikipedia.org/wiki/METEOR",
] , )
def _UpperCamelCase ( self : Dict , lowerCAmelCase__ : List[Any] ) -> Dict:
import nltk
nltk.download("wordnet" )
if NLTK_VERSION >= version.Version("3.6.5" ):
nltk.download("punkt" )
if NLTK_VERSION >= version.Version("3.6.6" ):
nltk.download("omw-1.4" )
def _UpperCamelCase ( self : Union[str, Any] , lowerCAmelCase__ : int , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Any=0.9 , lowerCAmelCase__ : Tuple=3 , lowerCAmelCase__ : Optional[int]=0.5 ) -> Any:
if NLTK_VERSION >= version.Version("3.6.5" ):
UpperCAmelCase = [
meteor_score.single_meteor_score(
word_tokenize(lowerCAmelCase__ ) , word_tokenize(lowerCAmelCase__ ) , alpha=lowerCAmelCase__ , beta=lowerCAmelCase__ , gamma=lowerCAmelCase__ )
for ref, pred in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
else:
UpperCAmelCase = [
meteor_score.single_meteor_score(lowerCAmelCase__ , lowerCAmelCase__ , alpha=lowerCAmelCase__ , beta=lowerCAmelCase__ , gamma=lowerCAmelCase__ )
for ref, pred in zip(lowerCAmelCase__ , lowerCAmelCase__ )
]
return {"meteor": np.mean(lowerCAmelCase__ )}
| 711 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def _lowerCAmelCase( __A ):
UpperCAmelCase = fname.split(os.path.sep )[-1]
return re.search(r"^(.*)_\d+\.jpg$" , __A ).groups()[0]
class __magic_name__ ( _snake_case ):
def __init__( self : Any , lowerCAmelCase__ : int , lowerCAmelCase__ : Union[str, Any]=None , lowerCAmelCase__ : int=None ) -> Optional[Any]:
UpperCAmelCase = file_names
UpperCAmelCase = image_transform
UpperCAmelCase = label_to_id
def __len__( self : Tuple ) -> List[str]:
return len(self.file_names )
def __getitem__( self : Optional[int] , lowerCAmelCase__ : Tuple ) -> Dict:
UpperCAmelCase = self.file_names[idx]
UpperCAmelCase = PIL.Image.open(lowerCAmelCase__ )
UpperCAmelCase = raw_image.convert("RGB" )
if self.image_transform is not None:
UpperCAmelCase = self.image_transform(lowerCAmelCase__ )
UpperCAmelCase = extract_label(lowerCAmelCase__ )
if self.label_to_id is not None:
UpperCAmelCase = self.label_to_id[label]
return {"image": image, "label": label}
def _lowerCAmelCase( __A , __A ):
# Initialize accelerator
if args.with_tracking:
UpperCAmelCase = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with="all" , project_dir=args.project_dir )
else:
UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase = config["lr"]
UpperCAmelCase = int(config["num_epochs"] )
UpperCAmelCase = int(config["seed"] )
UpperCAmelCase = int(config["batch_size"] )
UpperCAmelCase = config["image_size"]
if not isinstance(__A , (list, tuple) ):
UpperCAmelCase = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , "isdigit" ):
if args.checkpointing_steps == "epoch":
UpperCAmelCase = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
UpperCAmelCase = int(args.checkpointing_steps )
else:
raise ValueError(
F"Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed." )
else:
UpperCAmelCase = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
UpperCAmelCase = os.path.split(__A )[-1].split("." )[0]
accelerator.init_trackers(__A , __A )
# Grab all the image filenames
UpperCAmelCase = [os.path.join(args.data_dir , __A ) for fname in os.listdir(args.data_dir ) if fname.endswith(".jpg" )]
# Build the label correspondences
UpperCAmelCase = [extract_label(__A ) for fname in file_names]
UpperCAmelCase = list(set(__A ) )
id_to_label.sort()
UpperCAmelCase = {lbl: i for i, lbl in enumerate(__A )}
# Set the seed before splitting the data.
np.random.seed(__A )
torch.manual_seed(__A )
torch.cuda.manual_seed_all(__A )
# Split our filenames between train and validation
UpperCAmelCase = np.random.permutation(len(__A ) )
UpperCAmelCase = int(0.8 * len(__A ) )
UpperCAmelCase = random_perm[:cut]
UpperCAmelCase = random_perm[cut:]
# For training we use a simple RandomResizedCrop
UpperCAmelCase = Compose([RandomResizedCrop(__A , scale=(0.5, 1.0) ), ToTensor()] )
UpperCAmelCase = PetsDataset(
[file_names[i] for i in train_split] , image_transform=__A , label_to_id=__A )
# For evaluation, we use a deterministic Resize
UpperCAmelCase = Compose([Resize(__A ), ToTensor()] )
UpperCAmelCase = PetsDataset([file_names[i] for i in eval_split] , image_transform=__A , label_to_id=__A )
# Instantiate dataloaders.
UpperCAmelCase = DataLoader(__A , shuffle=__A , batch_size=__A , num_workers=4 )
UpperCAmelCase = DataLoader(__A , shuffle=__A , batch_size=__A , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase = create_model("resnet50d" , pretrained=__A , num_classes=len(__A ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
UpperCAmelCase = False
for param in model.get_classifier().parameters():
UpperCAmelCase = True
# We normalize the batches of images to be a bit faster.
UpperCAmelCase = torch.tensor(model.default_cfg["mean"] )[None, :, None, None].to(accelerator.device )
UpperCAmelCase = torch.tensor(model.default_cfg["std"] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
UpperCAmelCase = OneCycleLR(optimizer=__A , max_lr=__A , epochs=__A , steps_per_epoch=len(__A ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
__A , __A , __A , __A , __A )
# We need to keep track of how many total steps we have iterated over
UpperCAmelCase = 0
# We also need to keep track of the starting epoch so files are named properly
UpperCAmelCase = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F"Resumed from checkpoint: {args.resume_from_checkpoint}" )
accelerator.load_state(args.resume_from_checkpoint )
UpperCAmelCase = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
UpperCAmelCase = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
UpperCAmelCase = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
UpperCAmelCase = os.path.splitext(__A )[0]
if "epoch" in training_difference:
UpperCAmelCase = int(training_difference.replace("epoch_" , "" ) ) + 1
UpperCAmelCase = None
else:
UpperCAmelCase = int(training_difference.replace("step_" , "" ) )
UpperCAmelCase = resume_step // len(__A )
resume_step -= starting_epoch * len(__A )
# Now we train the model
for epoch in range(__A , __A ):
model.train()
if args.with_tracking:
UpperCAmelCase = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
UpperCAmelCase = accelerator.skip_first_batches(__A , __A )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
UpperCAmelCase = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCAmelCase = (batch["image"] - mean) / std
UpperCAmelCase = model(__A )
UpperCAmelCase = torch.nn.functional.cross_entropy(__A , batch["label"] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(__A )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(__A , __A ):
UpperCAmelCase = F"step_{overall_step}"
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
UpperCAmelCase = os.path.join(args.output_dir , __A )
accelerator.save_state(__A )
model.eval()
UpperCAmelCase = 0
UpperCAmelCase = 0
for step, batch in enumerate(__A ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
UpperCAmelCase = {k: v.to(accelerator.device ) for k, v in batch.items()}
UpperCAmelCase = (batch["image"] - mean) / std
with torch.no_grad():
UpperCAmelCase = model(__A )
UpperCAmelCase = outputs.argmax(dim=-1 )
UpperCAmelCase , UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch["label"]) )
UpperCAmelCase = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
UpperCAmelCase = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}: {100 * eval_metric:.2f}" )
if args.with_tracking:
accelerator.log(
{
"accuracy": 100 * eval_metric,
"train_loss": total_loss.item() / len(__A ),
"epoch": epoch,
} , step=__A , )
if checkpointing_steps == "epoch":
UpperCAmelCase = F"epoch_{epoch}"
if args.output_dir is not None:
UpperCAmelCase = os.path.join(args.output_dir , __A )
accelerator.save_state(__A )
if args.with_tracking:
accelerator.end_training()
def _lowerCAmelCase( ):
UpperCAmelCase = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument("--data_dir" , required=__A , help="The data folder on disk." )
parser.add_argument("--fp16" , action="store_true" , help="If passed, will use FP16 training." )
parser.add_argument(
"--mixed_precision" , type=__A , default=__A , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
parser.add_argument(
"--checkpointing_steps" , type=__A , default=__A , help="Whether the various states should be saved at the end of every n steps, or 'epoch' for each epoch." , )
parser.add_argument(
"--output_dir" , type=__A , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--resume_from_checkpoint" , type=__A , default=__A , help="If the training should continue from a checkpoint folder." , )
parser.add_argument(
"--with_tracking" , action="store_true" , help="Whether to load in all available experiment trackers from the environment and use them for logging." , )
parser.add_argument(
"--project_dir" , type=__A , default="logs" , help="Location on where to store experiment tracking logs` and relevent project information" , )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = {"lr": 3E-2, "num_epochs": 3, "seed": 42, "batch_size": 64, "image_size": 224}
training_function(__A , __A )
if __name__ == "__main__":
main()
| 1 | 0 |
class snake_case :
"""simple docstring"""
def __init__( self , lowerCAmelCase_ ):
__lowercase = len(lowerCAmelCase_ )
__lowercase = [0] * len_array
if len_array > 0:
__lowercase = array[0]
for i in range(1 , lowerCAmelCase_ ):
__lowercase = self.prefix_sum[i - 1] + array[i]
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ ):
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def snake_case__ ( self , lowerCAmelCase_ ):
__lowercase = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(lowerCAmelCase_ )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 321 | from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
lowerCAmelCase__ = logging.get_logger(__name__)
# General docstring
lowerCAmelCase__ = 'ResNetConfig'
# Base docstring
lowerCAmelCase__ = 'microsoft/resnet-50'
lowerCAmelCase__ = [1, 2_048, 7, 7]
# Image classification docstring
lowerCAmelCase__ = 'microsoft/resnet-50'
lowerCAmelCase__ = 'tiger cat'
lowerCAmelCase__ = [
'microsoft/resnet-50',
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 3 , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" ):
super().__init__()
__lowercase = nn.Convad(
lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=lowerCAmelCase_ , stride=lowerCAmelCase_ , padding=kernel_size // 2 , bias=lowerCAmelCase_ )
__lowercase = nn.BatchNormad(lowerCAmelCase_ )
__lowercase = ACTaFN[activation] if activation is not None else nn.Identity()
def snake_case__ ( self , lowerCAmelCase_ ):
__lowercase = self.convolution(lowerCAmelCase_ )
__lowercase = self.normalization(lowerCAmelCase_ )
__lowercase = self.activation(lowerCAmelCase_ )
return hidden_state
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ ):
super().__init__()
__lowercase = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
__lowercase = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
__lowercase = config.num_channels
def snake_case__ ( self , lowerCAmelCase_ ):
__lowercase = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"Make sure that the channel dimension of the pixel values match with the one set in the configuration." )
__lowercase = self.embedder(lowerCAmelCase_ )
__lowercase = self.pooler(lowerCAmelCase_ )
return embedding
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 2 ):
super().__init__()
__lowercase = nn.Convad(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , stride=lowerCAmelCase_ , bias=lowerCAmelCase_ )
__lowercase = nn.BatchNormad(lowerCAmelCase_ )
def snake_case__ ( self , lowerCAmelCase_ ):
__lowercase = self.convolution(lowerCAmelCase_ )
__lowercase = self.normalization(lowerCAmelCase_ )
return hidden_state
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" ):
super().__init__()
__lowercase = in_channels != out_channels or stride != 1
__lowercase = (
ResNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity()
)
__lowercase = nn.Sequential(
ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , activation=lowerCAmelCase_ ) , )
__lowercase = ACTaFN[activation]
def snake_case__ ( self , lowerCAmelCase_ ):
__lowercase = hidden_state
__lowercase = self.layer(lowerCAmelCase_ )
__lowercase = self.shortcut(lowerCAmelCase_ )
hidden_state += residual
__lowercase = self.activation(lowerCAmelCase_ )
return hidden_state
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 1 , lowerCAmelCase_ = "relu" , lowerCAmelCase_ = 4 ):
super().__init__()
__lowercase = in_channels != out_channels or stride != 1
__lowercase = out_channels // reduction
__lowercase = (
ResNetShortCut(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) if should_apply_shortcut else nn.Identity()
)
__lowercase = nn.Sequential(
ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ ) , ResNetConvLayer(lowerCAmelCase_ , lowerCAmelCase_ , kernel_size=1 , activation=lowerCAmelCase_ ) , )
__lowercase = ACTaFN[activation]
def snake_case__ ( self , lowerCAmelCase_ ):
__lowercase = hidden_state
__lowercase = self.layer(lowerCAmelCase_ )
__lowercase = self.shortcut(lowerCAmelCase_ )
hidden_state += residual
__lowercase = self.activation(lowerCAmelCase_ )
return hidden_state
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 2 , lowerCAmelCase_ = 2 , ):
super().__init__()
__lowercase = ResNetBottleNeckLayer if config.layer_type == "bottleneck" else ResNetBasicLayer
__lowercase = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(lowerCAmelCase_ , lowerCAmelCase_ , stride=lowerCAmelCase_ , activation=config.hidden_act ) , *[layer(lowerCAmelCase_ , lowerCAmelCase_ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def snake_case__ ( self , lowerCAmelCase_ ):
__lowercase = input
for layer in self.layers:
__lowercase = layer(lowerCAmelCase_ )
return hidden_state
class snake_case ( nn.Module ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ ):
super().__init__()
__lowercase = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
lowerCAmelCase_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
__lowercase = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(lowerCAmelCase_ , config.depths[1:] ):
self.stages.append(ResNetStage(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , depth=lowerCAmelCase_ ) )
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ = False , lowerCAmelCase_ = True ):
__lowercase = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__lowercase = hidden_states + (hidden_state,)
__lowercase = stage_module(lowerCAmelCase_ )
if output_hidden_states:
__lowercase = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=lowerCAmelCase_ , hidden_states=lowerCAmelCase_ , )
class snake_case ( __snake_case ):
"""simple docstring"""
__lowerCAmelCase = ResNetConfig
__lowerCAmelCase = """resnet"""
__lowerCAmelCase = """pixel_values"""
__lowerCAmelCase = True
def snake_case__ ( self , lowerCAmelCase_ ):
if isinstance(lowerCAmelCase_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="fan_out" , nonlinearity="relu" )
elif isinstance(lowerCAmelCase_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_=False ):
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
__lowercase = value
lowerCAmelCase__ = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it\n as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
lowerCAmelCase__ = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`ConvNextImageProcessor.__call__`] for details.\n\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for\n more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n'
@add_start_docstrings(
"""The bare ResNet model outputting raw features without any specific head on top.""" ,__snake_case ,)
class snake_case ( __snake_case ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ ):
super().__init__(lowerCAmelCase_ )
__lowercase = config
__lowercase = ResNetEmbeddings(lowerCAmelCase_ )
__lowercase = ResNetEncoder(lowerCAmelCase_ )
__lowercase = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , modality="vision" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ):
__lowercase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowercase = return_dict if return_dict is not None else self.config.use_return_dict
__lowercase = self.embedder(lowerCAmelCase_ )
__lowercase = self.encoder(
lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
__lowercase = encoder_outputs[0]
__lowercase = self.pooler(lowerCAmelCase_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=lowerCAmelCase_ , pooler_output=lowerCAmelCase_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" ,__snake_case ,)
class snake_case ( __snake_case ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ ):
super().__init__(lowerCAmelCase_ )
__lowercase = config.num_labels
__lowercase = ResNetModel(lowerCAmelCase_ )
# classification head
__lowercase = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def snake_case__ ( self , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , lowerCAmelCase_ = None , ):
__lowercase = return_dict if return_dict is not None else self.config.use_return_dict
__lowercase = self.resnet(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
__lowercase = outputs.pooler_output if return_dict else outputs[1]
__lowercase = self.classifier(lowerCAmelCase_ )
__lowercase = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__lowercase = "regression"
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__lowercase = "single_label_classification"
else:
__lowercase = "multi_label_classification"
if self.config.problem_type == "regression":
__lowercase = MSELoss()
if self.num_labels == 1:
__lowercase = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__lowercase = loss_fct(lowerCAmelCase_ , lowerCAmelCase_ )
elif self.config.problem_type == "single_label_classification":
__lowercase = CrossEntropyLoss()
__lowercase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__lowercase = BCEWithLogitsLoss()
__lowercase = loss_fct(lowerCAmelCase_ , lowerCAmelCase_ )
if not return_dict:
__lowercase = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=lowerCAmelCase_ , logits=lowerCAmelCase_ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"""
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
""" ,__snake_case ,)
class snake_case ( __snake_case ,__snake_case ):
"""simple docstring"""
def __init__( self , lowerCAmelCase_ ):
super().__init__(lowerCAmelCase_ )
super()._init_backbone(lowerCAmelCase_ )
__lowercase = [config.embedding_size] + config.hidden_sizes
__lowercase = ResNetEmbeddings(lowerCAmelCase_ )
__lowercase = ResNetEncoder(lowerCAmelCase_ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(lowerCAmelCase_ )
@replace_return_docstrings(output_type=lowerCAmelCase_ , config_class=_CONFIG_FOR_DOC )
def snake_case__ ( self , lowerCAmelCase_ , lowerCAmelCase_ = None , lowerCAmelCase_ = None ):
__lowercase = return_dict if return_dict is not None else self.config.use_return_dict
__lowercase = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowercase = self.embedder(lowerCAmelCase_ )
__lowercase = self.encoder(lowerCAmelCase_ , output_hidden_states=lowerCAmelCase_ , return_dict=lowerCAmelCase_ )
__lowercase = outputs.hidden_states
__lowercase = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
__lowercase = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=lowerCAmelCase_ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=lowerCAmelCase_ , )
| 321 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCAmelCase ( _a ):
_SCREAMING_SNAKE_CASE : Tuple ="""timm_backbone"""
def __init__( self , lowerCAmelCase__=None , lowerCAmelCase__=3 , lowerCAmelCase__=True , lowerCAmelCase__=True , lowerCAmelCase__=None , **lowerCAmelCase__ , ):
super().__init__(**lowerCAmelCase__ )
_A= backbone
_A= num_channels
_A= features_only
_A= use_pretrained_backbone
_A= True
_A= out_indices if out_indices is not None else (-1,) | 476 | import warnings
from ...utils import logging
from .image_processing_perceiver import PerceiverImageProcessor
UpperCAmelCase_ = logging.get_logger(__name__)
class lowerCAmelCase ( _a ):
def __init__( self , *lowerCAmelCase__ , **lowerCAmelCase__ ):
warnings.warn(
'The class PerceiverFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use PerceiverImageProcessor instead.' , lowerCAmelCase__ , )
super().__init__(*lowerCAmelCase__ , **lowerCAmelCase__ ) | 476 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class lowercase_ :
'''simple docstring'''
def __init__( self : Dict , _UpperCAmelCase : Any , ):
_A = parent
_A = 13
_A = 7
_A = True
_A = True
_A = True
_A = 99
_A = 32
_A = 2
_A = 4
_A = 37
_A = 'gelu'
_A = 0.1
_A = 0.1
_A = 512
_A = 16
_A = 2
_A = 0.02
_A = 3
_A = 4
_A = None
def lowerCAmelCase_ ( self : Dict ):
_A = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_A = None
if self.use_input_mask:
_A = random_attention_mask([self.batch_size, self.seq_length] )
_A = None
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_A = ids_tensor([self.batch_size] , self.num_choices )
_A = EsmConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , pad_token_id=1 , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , )
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase_ ( self : int ):
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = self.prepare_config_and_inputs()
_A = True
_A = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_A = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : int , _UpperCAmelCase : str , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[Any] ):
_A = TFEsmModel(config=_UpperCAmelCase )
_A = {'input_ids': input_ids, 'attention_mask': input_mask}
_A = model(_UpperCAmelCase )
_A = [input_ids, input_mask]
_A = model(_UpperCAmelCase )
_A = model(_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : List[Any] , _UpperCAmelCase : Dict , _UpperCAmelCase : str , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Union[str, Any] , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Dict , ):
_A = True
_A = TFEsmModel(config=_UpperCAmelCase )
_A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
_A = model(_UpperCAmelCase )
_A = [input_ids, input_mask]
_A = model(_UpperCAmelCase , encoder_hidden_states=_UpperCAmelCase )
# Also check the case where encoder outputs are not passed
_A = model(_UpperCAmelCase , attention_mask=_UpperCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Optional[int] , _UpperCAmelCase : Any , _UpperCAmelCase : Dict , _UpperCAmelCase : Dict , _UpperCAmelCase : Optional[int] ):
_A = TFEsmForMaskedLM(config=_UpperCAmelCase )
_A = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase_ ( self : Optional[int] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : List[str] , _UpperCAmelCase : Tuple ):
_A = self.num_labels
_A = TFEsmForTokenClassification(config=_UpperCAmelCase )
_A = {'input_ids': input_ids, 'attention_mask': input_mask}
_A = model(_UpperCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCAmelCase_ ( self : Optional[int] ):
_A = self.prepare_config_and_inputs()
(
(
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) , (
_A
) ,
) = config_and_inputs
_A = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowercase_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : List[Any] = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
UpperCAmelCase : str = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase : List[str] = False
UpperCAmelCase : str = False
def lowerCAmelCase_ ( self : Union[str, Any] ):
_A = TFEsmModelTester(self )
_A = ConfigTester(self , config_class=_UpperCAmelCase , hidden_size=37 )
def lowerCAmelCase_ ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase_ ( self : int ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_UpperCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
_A = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*_UpperCAmelCase )
def lowerCAmelCase_ ( self : List[Any] ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_UpperCAmelCase )
def lowerCAmelCase_ ( self : Dict ):
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_UpperCAmelCase )
@slow
def lowerCAmelCase_ ( self : List[str] ):
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = TFEsmModel.from_pretrained(_UpperCAmelCase )
self.assertIsNotNone(_UpperCAmelCase )
@unittest.skip('Protein models do not support embedding resizing.' )
def lowerCAmelCase_ ( self : str ):
pass
@unittest.skip('Protein models do not support embedding resizing.' )
def lowerCAmelCase_ ( self : int ):
pass
def lowerCAmelCase_ ( self : Dict ):
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(_UpperCAmelCase )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
_A = model.get_bias()
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
for k, v in name.items():
assert isinstance(_UpperCAmelCase , tf.Variable )
else:
_A = model.get_output_embeddings()
assert x is None
_A = model.get_bias()
assert name is None
@require_tf
class lowercase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCAmelCase_ ( self : List[str] ):
_A = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
_A = tf.constant([[0, 1, 2, 3, 4, 5]] )
_A = model(_UpperCAmelCase )[0]
_A = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) , _UpperCAmelCase )
# compare the actual values for a slice.
_A = tf.constant(
[
[
[8.92_1518, -10.58_9814, -6.467_1307],
[-6.396_7156, -13.91_1377, -1.121_1915],
[-7.78_1247, -13.95_1557, -3.74_0592],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-2 ) )
@slow
def lowerCAmelCase_ ( self : int ):
_A = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
_A = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
_A = model(_UpperCAmelCase )[0]
# compare the actual values for a slice.
_A = tf.constant(
[
[
[0.1444_3092, 0.5412_5327, 0.324_7739],
[0.3034_0484, 0.0052_6676, 0.3107_7722],
[0.3227_8043, -0.2498_7096, 0.341_4628],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) )
| 7 |
"""simple docstring"""
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
UpperCAmelCase__ : Dict = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
UpperCAmelCase__ : Union[str, Any] = {
# fairseq:
'wmt19-ru-en': {'length_penalty': 1.1},
'wmt19-en-ru': {'length_penalty': 1.15},
'wmt19-en-de': {'length_penalty': 1.0},
'wmt19-de-en': {'length_penalty': 1.1},
# allenai:
'wmt16-en-de-dist-12-1': {'length_penalty': 0.6},
'wmt16-en-de-dist-6-1': {'length_penalty': 0.6},
'wmt16-en-de-12-1': {'length_penalty': 0.8},
'wmt19-de-en-6-6-base': {'length_penalty': 0.6},
'wmt19-de-en-6-6-big': {'length_penalty': 0.6},
}
# this remaps the different models to their organization names
UpperCAmelCase__ : Optional[int] = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
UpperCAmelCase__ : List[str] = 'facebook'
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
UpperCAmelCase__ : List[Any] = 'allenai'
def lowercase_ ( _snake_case ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
SCREAMING_SNAKE_CASE__ : List[Any] = dict((re.sub(R"""@@$""" ,"""""" ,_snake_case ), v) if k.endswith("""@@""" ) else (re.sub(R"""$""" ,"""</w>""" ,_snake_case ), v) for k, v in d.items() )
SCREAMING_SNAKE_CASE__ : List[Any] = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[f'''{k}</w>''']
SCREAMING_SNAKE_CASE__ : Optional[Any] = d[k] # restore
return da
def lowercase_ ( _snake_case ,_snake_case ):
# prep
assert os.path.exists(_snake_case )
os.makedirs(_snake_case ,exist_ok=_snake_case )
print(f'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
SCREAMING_SNAKE_CASE__ : Optional[Any] = basename(_snake_case )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = dirname(_snake_case )
SCREAMING_SNAKE_CASE__ : Any = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
SCREAMING_SNAKE_CASE__ : Optional[int] = cls.hub_models()
SCREAMING_SNAKE_CASE__ : Optional[Any] = {"""bpe""": """fastbpe""", """tokenizer""": """moses"""}
SCREAMING_SNAKE_CASE__ : Optional[int] = """."""
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(f'''using checkpoint {checkpoint_file}''' )
SCREAMING_SNAKE_CASE__ : List[str] = hub_utils.from_pretrained(
_snake_case ,_snake_case ,_snake_case ,archive_map=_snake_case ,**_snake_case )
SCREAMING_SNAKE_CASE__ : List[Any] = vars(chkpt["""args"""]["""model"""] )
SCREAMING_SNAKE_CASE__ : Dict = args["""source_lang"""]
SCREAMING_SNAKE_CASE__ : List[Any] = args["""target_lang"""]
SCREAMING_SNAKE_CASE__ : List[Any] = dirname(_snake_case )
SCREAMING_SNAKE_CASE__ : Tuple = basename(_snake_case )
# dicts
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(_snake_case ,f'''dict.{src_lang}.txt''' )
SCREAMING_SNAKE_CASE__ : int = os.path.join(_snake_case ,f'''dict.{tgt_lang}.txt''' )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Dictionary.load(_snake_case )
SCREAMING_SNAKE_CASE__ : Any = rewrite_dict_keys(src_dict.indices )
SCREAMING_SNAKE_CASE__ : str = len(_snake_case )
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(_snake_case ,"""vocab-src.json""" )
print(f'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(_snake_case ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(_snake_case ,ensure_ascii=_snake_case ,indent=_snake_case ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
SCREAMING_SNAKE_CASE__ : Union[str, Any] = True
for k in src_vocab.keys():
if not k.islower():
SCREAMING_SNAKE_CASE__ : int = False
break
SCREAMING_SNAKE_CASE__ : Dict = Dictionary.load(_snake_case )
SCREAMING_SNAKE_CASE__ : List[str] = rewrite_dict_keys(tgt_dict.indices )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = len(_snake_case )
SCREAMING_SNAKE_CASE__ : List[Any] = os.path.join(_snake_case ,"""vocab-tgt.json""" )
print(f'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(_snake_case ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(_snake_case ,ensure_ascii=_snake_case ,indent=_snake_case ) )
# merges_file (bpecodes)
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(_snake_case ,VOCAB_FILES_NAMES["""merges_file"""] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
SCREAMING_SNAKE_CASE__ : Optional[int] = os.path.join(_snake_case ,_snake_case )
if os.path.exists(_snake_case ):
break
with open(_snake_case ,encoding="""utf-8""" ) as fin:
SCREAMING_SNAKE_CASE__ : List[str] = fin.read()
SCREAMING_SNAKE_CASE__ : Tuple = re.sub(R""" \d+$""" ,"""""" ,_snake_case ,0 ,re.M ) # remove frequency number
print(f'''Generating {merges_file}''' )
with open(_snake_case ,"""w""" ,encoding="""utf-8""" ) as fout:
fout.write(_snake_case )
# model config
SCREAMING_SNAKE_CASE__ : Union[str, Any] = os.path.join(_snake_case ,"""config.json""" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", f'''need to extend tokenizer to support bpe={args['bpe']}'''
assert args["tokenizer"] == "moses", f'''need to extend tokenizer to support bpe={args['tokenizer']}'''
SCREAMING_SNAKE_CASE__ : Union[str, Any] = {
"""architectures""": ["""FSMTForConditionalGeneration"""],
"""model_type""": """fsmt""",
"""activation_dropout""": args["""activation_dropout"""],
"""activation_function""": """relu""",
"""attention_dropout""": args["""attention_dropout"""],
"""d_model""": args["""decoder_embed_dim"""],
"""dropout""": args["""dropout"""],
"""init_std""": 0.02,
"""max_position_embeddings""": args["""max_source_positions"""],
"""num_hidden_layers""": args["""encoder_layers"""],
"""src_vocab_size""": src_vocab_size,
"""tgt_vocab_size""": tgt_vocab_size,
"""langs""": [src_lang, tgt_lang],
"""encoder_attention_heads""": args["""encoder_attention_heads"""],
"""encoder_ffn_dim""": args["""encoder_ffn_embed_dim"""],
"""encoder_layerdrop""": args["""encoder_layerdrop"""],
"""encoder_layers""": args["""encoder_layers"""],
"""decoder_attention_heads""": args["""decoder_attention_heads"""],
"""decoder_ffn_dim""": args["""decoder_ffn_embed_dim"""],
"""decoder_layerdrop""": args["""decoder_layerdrop"""],
"""decoder_layers""": args["""decoder_layers"""],
"""bos_token_id""": 0,
"""pad_token_id""": 1,
"""eos_token_id""": 2,
"""is_encoder_decoder""": True,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_all_embeddings"""],
}
# good hparam defaults to start with
SCREAMING_SNAKE_CASE__ : str = 5
SCREAMING_SNAKE_CASE__ : Optional[Any] = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
SCREAMING_SNAKE_CASE__ : Dict = best_score_hparams[model_dir]["""length_penalty"""]
else:
SCREAMING_SNAKE_CASE__ : str = 1.0
print(f'''Generating {fsmt_model_config_file}''' )
with open(_snake_case ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(_snake_case ,ensure_ascii=_snake_case ,indent=_snake_case ) )
# tokenizer config
SCREAMING_SNAKE_CASE__ : List[str] = os.path.join(_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : Any = {
"""langs""": [src_lang, tgt_lang],
"""model_max_length""": 1_024,
"""do_lower_case""": do_lower_case,
}
print(f'''Generating {fsmt_tokenizer_config_file}''' )
with open(_snake_case ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(_snake_case ,ensure_ascii=_snake_case ,indent=_snake_case ) )
# model
SCREAMING_SNAKE_CASE__ : Any = chkpt["""models"""][0]
SCREAMING_SNAKE_CASE__ : List[Any] = model.state_dict()
# rename keys to start with 'model.'
SCREAMING_SNAKE_CASE__ : Dict = OrderedDict(("""model.""" + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
SCREAMING_SNAKE_CASE__ : Tuple = [
"""model.model""",
"""model.encoder.version""",
"""model.decoder.version""",
"""model.encoder_embed_tokens.weight""",
"""model.decoder_embed_tokens.weight""",
"""model.encoder.embed_positions._float_tensor""",
"""model.decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
model_state_dict.pop(_snake_case ,_snake_case )
SCREAMING_SNAKE_CASE__ : Dict = FSMTConfig.from_pretrained(_snake_case )
SCREAMING_SNAKE_CASE__ : Dict = FSMTForConditionalGeneration(_snake_case )
# check that it loads ok
model_new.load_state_dict(_snake_case ,strict=_snake_case )
# save
SCREAMING_SNAKE_CASE__ : int = os.path.join(_snake_case ,_snake_case )
print(f'''Generating {pytorch_weights_dump_path}''' )
torch.save(_snake_case ,_snake_case )
print("""Conversion is done!""" )
print("""\nLast step is to upload the files to s3""" )
print(f'''cd {data_root}''' )
print(f'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
UpperCAmelCase__ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--fsmt_checkpoint_path',
default=None,
type=str,
required=True,
help=(
'Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'
' bpecodes, etc.'
),
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
UpperCAmelCase__ : Any = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 223 | 0 |
"""simple docstring"""
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def snake_case__ ( _lowerCamelCase, _lowerCamelCase ) ->int:
"""simple docstring"""
__lowercase : Dict = checkpoint
__lowercase : Union[str, Any] = {}
__lowercase : Optional[int] = vae_state_dict["encoder.conv_in.weight"]
__lowercase : str = vae_state_dict["encoder.conv_in.bias"]
__lowercase : Any = vae_state_dict["encoder.conv_out.weight"]
__lowercase : Any = vae_state_dict["encoder.conv_out.bias"]
__lowercase : Tuple = vae_state_dict["encoder.norm_out.weight"]
__lowercase : List[str] = vae_state_dict["encoder.norm_out.bias"]
__lowercase : Optional[int] = vae_state_dict["decoder.conv_in.weight"]
__lowercase : Any = vae_state_dict["decoder.conv_in.bias"]
__lowercase : Optional[int] = vae_state_dict["decoder.conv_out.weight"]
__lowercase : Optional[int] = vae_state_dict["decoder.conv_out.bias"]
__lowercase : str = vae_state_dict["decoder.norm_out.weight"]
__lowercase : str = vae_state_dict["decoder.norm_out.bias"]
__lowercase : List[str] = vae_state_dict["quant_conv.weight"]
__lowercase : Any = vae_state_dict["quant_conv.bias"]
__lowercase : Dict = vae_state_dict["post_quant_conv.weight"]
__lowercase : Tuple = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
__lowercase : Optional[Any] = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
__lowercase : Dict = {
layer_id: [key for key in vae_state_dict if F'down.{layer_id}' in key] for layer_id in range(UpperCAmelCase__ )
}
# Retrieves the keys for the decoder up blocks only
__lowercase : Any = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
__lowercase : int = {
layer_id: [key for key in vae_state_dict if F'up.{layer_id}' in key] for layer_id in range(UpperCAmelCase__ )
}
for i in range(UpperCAmelCase__ ):
__lowercase : Tuple = [key for key in down_blocks[i] if F'down.{i}' in key and F'down.{i}.downsample' not in key]
if F'encoder.down.{i}.downsample.conv.weight' in vae_state_dict:
__lowercase : Optional[Any] = vae_state_dict.pop(
F'encoder.down.{i}.downsample.conv.weight' )
__lowercase : Optional[int] = vae_state_dict.pop(
F'encoder.down.{i}.downsample.conv.bias' )
__lowercase : str = renew_vae_resnet_paths(UpperCAmelCase__ )
__lowercase : List[str] = {"old": F'down.{i}.block', "new": F'down_blocks.{i}.resnets'}
assign_to_checkpoint(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, additional_replacements=[meta_path], config=UpperCAmelCase__ )
__lowercase : int = [key for key in vae_state_dict if "encoder.mid.block" in key]
__lowercase : List[Any] = 2
for i in range(1, num_mid_res_blocks + 1 ):
__lowercase : Optional[Any] = [key for key in mid_resnets if F'encoder.mid.block_{i}' in key]
__lowercase : Any = renew_vae_resnet_paths(UpperCAmelCase__ )
__lowercase : Union[str, Any] = {"old": F'mid.block_{i}', "new": F'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, additional_replacements=[meta_path], config=UpperCAmelCase__ )
__lowercase : Optional[int] = [key for key in vae_state_dict if "encoder.mid.attn" in key]
__lowercase : str = renew_vae_attention_paths(UpperCAmelCase__ )
__lowercase : Optional[int] = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, additional_replacements=[meta_path], config=UpperCAmelCase__ )
conv_attn_to_linear(UpperCAmelCase__ )
for i in range(UpperCAmelCase__ ):
__lowercase : Optional[Any] = num_up_blocks - 1 - i
__lowercase : Any = [
key for key in up_blocks[block_id] if F'up.{block_id}' in key and F'up.{block_id}.upsample' not in key
]
if F'decoder.up.{block_id}.upsample.conv.weight' in vae_state_dict:
__lowercase : Tuple = vae_state_dict[
F'decoder.up.{block_id}.upsample.conv.weight'
]
__lowercase : Tuple = vae_state_dict[
F'decoder.up.{block_id}.upsample.conv.bias'
]
__lowercase : Dict = renew_vae_resnet_paths(UpperCAmelCase__ )
__lowercase : Optional[int] = {"old": F'up.{block_id}.block', "new": F'up_blocks.{i}.resnets'}
assign_to_checkpoint(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, additional_replacements=[meta_path], config=UpperCAmelCase__ )
__lowercase : List[str] = [key for key in vae_state_dict if "decoder.mid.block" in key]
__lowercase : Optional[Any] = 2
for i in range(1, num_mid_res_blocks + 1 ):
__lowercase : int = [key for key in mid_resnets if F'decoder.mid.block_{i}' in key]
__lowercase : Optional[int] = renew_vae_resnet_paths(UpperCAmelCase__ )
__lowercase : Dict = {"old": F'mid.block_{i}', "new": F'mid_block.resnets.{i - 1}'}
assign_to_checkpoint(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, additional_replacements=[meta_path], config=UpperCAmelCase__ )
__lowercase : Optional[int] = [key for key in vae_state_dict if "decoder.mid.attn" in key]
__lowercase : int = renew_vae_attention_paths(UpperCAmelCase__ )
__lowercase : Optional[int] = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(UpperCAmelCase__, UpperCAmelCase__, UpperCAmelCase__, additional_replacements=[meta_path], config=UpperCAmelCase__ )
conv_attn_to_linear(UpperCAmelCase__ )
return new_checkpoint
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, ) ->Tuple:
"""simple docstring"""
__lowercase : List[str] = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
__lowercase : str = io.BytesIO(r.content )
__lowercase : int = OmegaConf.load(UpperCAmelCase__ )
__lowercase : List[Any] = 5_12
__lowercase : Dict = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
__lowercase : int = {}
with safe_open(UpperCAmelCase__, framework="pt", device="cpu" ) as f:
for key in f.keys():
__lowercase : Dict = f.get_tensor(UpperCAmelCase__ )
else:
__lowercase : Dict = torch.load(UpperCAmelCase__, map_location=UpperCAmelCase__ )["state_dict"]
# Convert the VAE model.
__lowercase : Optional[int] = create_vae_diffusers_config(UpperCAmelCase__, image_size=UpperCAmelCase__ )
__lowercase : str = custom_convert_ldm_vae_checkpoint(UpperCAmelCase__, UpperCAmelCase__ )
__lowercase : List[str] = AutoencoderKL(**UpperCAmelCase__ )
vae.load_state_dict(UpperCAmelCase__ )
vae.save_pretrained(UpperCAmelCase__ )
if __name__ == "__main__":
__A : Any = argparse.ArgumentParser()
parser.add_argument('--vae_pt_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the VAE.pt to convert.')
__A : Tuple = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 715 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
__A : int = '\nHuman: <<task>>\n\nAssistant: '
__A : List[str] = 'huggingface-tools/default-prompts'
__A : Tuple = {'chat': 'chat_prompt_template.txt', 'run': 'run_prompt_template.txt'}
def snake_case__ ( _lowerCamelCase, _lowerCamelCase, _lowerCamelCase="run" ) ->Optional[int]:
"""simple docstring"""
if prompt_or_repo_id is None:
__lowercase : Any = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("\\s", _lowerCamelCase ) is not None:
return prompt_or_repo_id
__lowercase : Optional[Any] = cached_file(
_lowerCamelCase, PROMPT_FILES[mode], repo_type="dataset", user_agent={"agent": agent_name} )
with open(_lowerCamelCase, "r", encoding="utf-8" ) as f:
return f.read()
| 281 | 0 |
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __lowercase ( _UpperCamelCase , unittest.TestCase ):
UpperCamelCase = VideoToVideoSDPipeline
UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'''video'''} ) - {'''image''', '''width''', '''height'''}
UpperCamelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''video'''} ) - {'''image'''}
UpperCamelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
UpperCamelCase = False
# No `output_type`.
UpperCamelCase = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def _lowercase ( self : str ) -> List[str]:
"""simple docstring"""
torch.manual_seed(0 )
UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(3_2, 6_4, 6_4, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") , up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") , cross_attention_dim=3_2 , attention_head_dim=4 , )
UpperCAmelCase = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__lowerCamelCase , set_alpha_to_one=__lowerCamelCase , )
torch.manual_seed(0 )
UpperCAmelCase = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="""gelu""" , projection_dim=5_1_2 , )
UpperCAmelCase = CLIPTextModel(__lowerCamelCase )
UpperCAmelCase = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
UpperCAmelCase = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def _lowercase ( self : int , __lowerCamelCase : List[Any] , __lowerCamelCase : Any=0 ) -> Optional[Any]:
"""simple docstring"""
UpperCAmelCase = floats_tensor((1, 3, 3, 3_2, 3_2) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
if str(__lowerCamelCase ).startswith("""mps""" ):
UpperCAmelCase = torch.manual_seed(__lowerCamelCase )
else:
UpperCAmelCase = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
UpperCAmelCase = {
"prompt": "A painting of a squirrel eating a burger",
"video": video,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def _lowercase ( self : str ) -> Union[str, Any]:
"""simple docstring"""
UpperCAmelCase = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase = self.get_dummy_components()
UpperCAmelCase = VideoToVideoSDPipeline(**__lowerCamelCase )
UpperCAmelCase = sd_pipe.to(__lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=__lowerCamelCase )
UpperCAmelCase = self.get_dummy_inputs(__lowerCamelCase )
UpperCAmelCase = "np"
UpperCAmelCase = sd_pipe(**__lowerCamelCase ).frames
UpperCAmelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (3_2, 3_2, 3)
UpperCAmelCase = np.array([1_0_6, 1_1_7, 1_1_3, 1_7_4, 1_3_7, 1_1_2, 1_4_8, 1_5_1, 1_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def _lowercase ( self : Any ) -> List[str]:
"""simple docstring"""
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__lowerCamelCase , expected_max_diff=5e-3 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def _lowercase ( self : int ) -> Tuple:
"""simple docstring"""
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def _lowercase ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def _lowercase ( self : Dict ) -> List[str]:
"""simple docstring"""
pass
def _lowercase ( self : int ) -> str:
"""simple docstring"""
return super().test_progress_bar()
@slow
@skip_mps
class __lowercase ( unittest.TestCase ):
def _lowercase ( self : int ) -> List[Any]:
"""simple docstring"""
UpperCAmelCase = VideoToVideoSDPipeline.from_pretrained("""cerspense/zeroscope_v2_XL""" , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
UpperCAmelCase = torch.Generator(device="""cpu""" ).manual_seed(0 )
UpperCAmelCase = torch.randn((1, 1_0, 3, 1_0_2_4, 5_7_6) , generator=__lowerCamelCase )
UpperCAmelCase = video.to("""cuda""" )
UpperCAmelCase = "Spiderman is surfing"
UpperCAmelCase = pipe(__lowerCamelCase , video=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=3 , output_type="""pt""" ).frames
UpperCAmelCase = np.array([-1.0_458_984, -1.1_279_297, -0.9_663_086, -0.91_503_906, -0.75_097_656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1e-2
| 377 |
import importlib
import math
import os
from dataclasses import dataclass
from enum import Enum
from typing import Any, Dict, Optional, Tuple, Union
import flax
import jax.numpy as jnp
from ..utils import BaseOutput
lowerCAmelCase__ = '''scheduler_config.json'''
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = 1
lowercase_ = 2
lowercase_ = 3
lowercase_ = 4
lowercase_ = 5
@dataclass
class snake_case__(_UpperCamelCase ):
"""simple docstring"""
lowercase_ = 42
class snake_case__:
"""simple docstring"""
lowercase_ = SCHEDULER_CONFIG_NAME
lowercase_ = ["""dtype"""]
lowercase_ = []
lowercase_ = True
@classmethod
def snake_case ( cls : Optional[Any] , SCREAMING_SNAKE_CASE : Dict[str, Any] = None , SCREAMING_SNAKE_CASE : Optional[str] = None , SCREAMING_SNAKE_CASE : Optional[Any]=False , **SCREAMING_SNAKE_CASE : str , ):
lowercase__ , lowercase__ : Union[str, Any] = cls.load_config(
pretrained_model_name_or_path=SCREAMING_SNAKE_CASE , subfolder=SCREAMING_SNAKE_CASE , return_unused_kwargs=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE , )
lowercase__ , lowercase__ : int = cls.from_config(SCREAMING_SNAKE_CASE , return_unused_kwargs=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
if hasattr(SCREAMING_SNAKE_CASE , "create_state" ) and getattr(SCREAMING_SNAKE_CASE , "has_state" , SCREAMING_SNAKE_CASE ):
lowercase__ : Optional[int] = scheduler.create_state()
if return_unused_kwargs:
return scheduler, state, unused_kwargs
return scheduler, state
def snake_case ( self : Tuple , SCREAMING_SNAKE_CASE : Union[str, os.PathLike] , SCREAMING_SNAKE_CASE : bool = False , **SCREAMING_SNAKE_CASE : Optional[int] ):
self.save_config(save_directory=SCREAMING_SNAKE_CASE , push_to_hub=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE )
@property
def snake_case ( self : int ):
return self._get_compatibles()
@classmethod
def snake_case ( cls : Tuple ):
lowercase__ : Optional[int] = list(set([cls.__name__] + cls._compatibles ) )
lowercase__ : List[str] = importlib.import_module(__name__.split("." )[0] )
lowercase__ : Tuple = [
getattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) for c in compatible_classes_str if hasattr(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
]
return compatible_classes
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
assert len(lowerCamelCase__ ) >= x.ndim
return jnp.broadcast_to(x.reshape(x.shape + (1,) * (len(lowerCamelCase__ ) - x.ndim) ) , lowerCamelCase__ )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__=0.999 , lowerCamelCase__=jnp.floataa ):
"""simple docstring"""
def alpha_bar(lowerCamelCase__ ):
return math.cos((time_step + 0.008) / 1.008 * math.pi / 2 ) ** 2
lowercase__ : Dict = []
for i in range(lowerCamelCase__ ):
lowercase__ : List[str] = i / num_diffusion_timesteps
lowercase__ : Dict = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar(lowerCamelCase__ ) / alpha_bar(lowerCamelCase__ ) , lowerCamelCase__ ) )
return jnp.array(lowerCamelCase__ , dtype=lowerCamelCase__ )
@flax.struct.dataclass
class snake_case__:
"""simple docstring"""
lowercase_ = 42
lowercase_ = 42
lowercase_ = 42
@classmethod
def snake_case ( cls : Union[str, Any] , SCREAMING_SNAKE_CASE : List[str] ):
lowercase__ : List[Any] = scheduler.config
if config.trained_betas is not None:
lowercase__ : List[str] = jnp.asarray(config.trained_betas , dtype=scheduler.dtype )
elif config.beta_schedule == "linear":
lowercase__ : Optional[int] = jnp.linspace(config.beta_start , config.beta_end , config.num_train_timesteps , dtype=scheduler.dtype )
elif config.beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
lowercase__ : List[str] = (
jnp.linspace(
config.beta_start**0.5 , config.beta_end**0.5 , config.num_train_timesteps , dtype=scheduler.dtype )
** 2
)
elif config.beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
lowercase__ : Any = betas_for_alpha_bar(config.num_train_timesteps , dtype=scheduler.dtype )
else:
raise NotImplementedError(
f"""beta_schedule {config.beta_schedule} is not implemented for scheduler {scheduler.__class__.__name__}""" )
lowercase__ : Dict = 1.0 - betas
lowercase__ : List[Any] = jnp.cumprod(SCREAMING_SNAKE_CASE , axis=0 )
return cls(
alphas=SCREAMING_SNAKE_CASE , betas=SCREAMING_SNAKE_CASE , alphas_cumprod=SCREAMING_SNAKE_CASE , )
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ : Tuple = state.alphas_cumprod
lowercase__ : int = alphas_cumprod[timesteps] ** 0.5
lowercase__ : Optional[int] = sqrt_alpha_prod.flatten()
lowercase__ : Tuple = broadcast_to_shape_from_left(lowerCamelCase__ , original_samples.shape )
lowercase__ : List[str] = (1 - alphas_cumprod[timesteps]) ** 0.5
lowercase__ : Union[str, Any] = sqrt_one_minus_alpha_prod.flatten()
lowercase__ : Optional[Any] = broadcast_to_shape_from_left(lowerCamelCase__ , original_samples.shape )
return sqrt_alpha_prod, sqrt_one_minus_alpha_prod
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ , lowercase__ : Dict = get_sqrt_alpha_prod(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : int = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
return noisy_samples
def __lowerCamelCase ( lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
"""simple docstring"""
lowercase__ , lowercase__ : int = get_sqrt_alpha_prod(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowercase__ : Any = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample
return velocity
| 496 | 0 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def lowercase ( __magic_name__ , __magic_name__=0.9_9_9 , __magic_name__="cosine" , ):
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(__magic_name__ ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__magic_name__ ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F"Unsupported alpha_tranform_type: {alpha_transform_type}" )
UpperCAmelCase : Any = []
for i in range(__magic_name__ ):
UpperCAmelCase : Optional[Any] = i / num_diffusion_timesteps
UpperCAmelCase : int = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__magic_name__ ) / alpha_bar_fn(__magic_name__ ) , __magic_name__ ) )
return torch.tensor(__magic_name__ , dtype=torch.floataa )
class UpperCamelCase__ ( lowercase__ , lowercase__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[str] = [e.name for e in KarrasDiffusionSchedulers]
SCREAMING_SNAKE_CASE__ : Any = 2
@register_to_config
def __init__( self , snake_case = 1_0_0_0 , snake_case = 0.0_0085 , snake_case = 0.012 , snake_case = "linear" , snake_case = None , snake_case = "epsilon" , snake_case = False , snake_case = False , snake_case = 1.0 , snake_case = "linspace" , snake_case = 0 , ):
'''simple docstring'''
if trained_betas is not None:
UpperCAmelCase : List[str] = torch.tensor(snake_case , dtype=torch.floataa )
elif beta_schedule == "linear":
UpperCAmelCase : Tuple = torch.linspace(snake_case , snake_case , snake_case , dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
UpperCAmelCase : Dict = (
torch.linspace(beta_start**0.5 , beta_end**0.5 , snake_case , dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
UpperCAmelCase : str = betas_for_alpha_bar(snake_case , alpha_transform_type="cosine" )
elif beta_schedule == "exp":
UpperCAmelCase : Optional[Any] = betas_for_alpha_bar(snake_case , alpha_transform_type="exp" )
else:
raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}" )
UpperCAmelCase : Optional[int] = 1.0 - self.betas
UpperCAmelCase : Optional[Any] = torch.cumprod(self.alphas , dim=0 )
# set all values
self.set_timesteps(snake_case , snake_case , snake_case )
UpperCAmelCase : Dict = use_karras_sigmas
def A_ ( self , snake_case , snake_case=None ):
'''simple docstring'''
if schedule_timesteps is None:
UpperCAmelCase : Dict = self.timesteps
UpperCAmelCase : Optional[Any] = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
UpperCAmelCase : Optional[Any] = 1 if len(snake_case ) > 1 else 0
else:
UpperCAmelCase : Tuple = timestep.cpu().item() if torch.is_tensor(snake_case ) else timestep
UpperCAmelCase : Optional[Any] = self._index_counter[timestep_int]
return indices[pos].item()
@property
def A_ ( self ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def A_ ( self , snake_case , snake_case , ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.index_for_timestep(snake_case )
UpperCAmelCase : Tuple = self.sigmas[step_index]
UpperCAmelCase : Optional[int] = sample / ((sigma**2 + 1) ** 0.5)
return sample
def A_ ( self , snake_case , snake_case = None , snake_case = None , ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = num_inference_steps
UpperCAmelCase : int = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
UpperCAmelCase : Union[str, Any] = np.linspace(0 , num_train_timesteps - 1 , snake_case , dtype=snake_case )[::-1].copy()
elif self.config.timestep_spacing == "leading":
UpperCAmelCase : Optional[Any] = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCAmelCase : Any = (np.arange(0 , snake_case ) * step_ratio).round()[::-1].copy().astype(snake_case )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
UpperCAmelCase : Optional[Any] = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
UpperCAmelCase : Optional[Any] = (np.arange(snake_case , 0 , -step_ratio )).round().copy().astype(snake_case )
timesteps -= 1
else:
raise ValueError(
f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." )
UpperCAmelCase : List[Any] = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
UpperCAmelCase : Union[str, Any] = np.log(snake_case )
UpperCAmelCase : Optional[int] = np.interp(snake_case , np.arange(0 , len(snake_case ) ) , snake_case )
if self.config.use_karras_sigmas:
UpperCAmelCase : Optional[int] = self._convert_to_karras(in_sigmas=snake_case , num_inference_steps=self.num_inference_steps )
UpperCAmelCase : str = np.array([self._sigma_to_t(snake_case , snake_case ) for sigma in sigmas] )
UpperCAmelCase : Optional[Any] = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
UpperCAmelCase : List[Any] = torch.from_numpy(snake_case ).to(device=snake_case )
UpperCAmelCase : Optional[int] = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
UpperCAmelCase : Union[str, Any] = torch.from_numpy(snake_case )
UpperCAmelCase : Any = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(snake_case ).startswith("mps" ):
# mps does not support float64
UpperCAmelCase : int = timesteps.to(snake_case , dtype=torch.floataa )
else:
UpperCAmelCase : Optional[Any] = timesteps.to(device=snake_case )
# empty dt and derivative
UpperCAmelCase : List[Any] = None
UpperCAmelCase : int = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
UpperCAmelCase : List[str] = defaultdict(snake_case )
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : str = np.log(snake_case )
# get distribution
UpperCAmelCase : Any = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
UpperCAmelCase : List[str] = np.cumsum((dists >= 0) , axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
UpperCAmelCase : Dict = low_idx + 1
UpperCAmelCase : str = log_sigmas[low_idx]
UpperCAmelCase : List[str] = log_sigmas[high_idx]
# interpolate sigmas
UpperCAmelCase : Tuple = (low - log_sigma) / (low - high)
UpperCAmelCase : str = np.clip(snake_case , 0 , 1 )
# transform interpolation to time range
UpperCAmelCase : str = (1 - w) * low_idx + w * high_idx
UpperCAmelCase : Dict = t.reshape(sigma.shape )
return t
def A_ ( self , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : float = in_sigmas[-1].item()
UpperCAmelCase : float = in_sigmas[0].item()
UpperCAmelCase : Dict = 7.0 # 7.0 is the value used in the paper
UpperCAmelCase : Optional[int] = np.linspace(0 , 1 , snake_case )
UpperCAmelCase : Tuple = sigma_min ** (1 / rho)
UpperCAmelCase : int = sigma_max ** (1 / rho)
UpperCAmelCase : Tuple = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def A_ ( self ):
'''simple docstring'''
return self.dt is None
def A_ ( self , snake_case , snake_case , snake_case , snake_case = True , ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.index_for_timestep(snake_case )
# advance index counter by 1
UpperCAmelCase : List[str] = timestep.cpu().item() if torch.is_tensor(snake_case ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
UpperCAmelCase : int = self.sigmas[step_index]
UpperCAmelCase : Union[str, Any] = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
UpperCAmelCase : Tuple = self.sigmas[step_index - 1]
UpperCAmelCase : Tuple = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
UpperCAmelCase : Any = 0
UpperCAmelCase : List[Any] = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
UpperCAmelCase : Optional[int] = sigma_hat if self.state_in_first_order else sigma_next
UpperCAmelCase : Any = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
UpperCAmelCase : str = sigma_hat if self.state_in_first_order else sigma_next
UpperCAmelCase : str = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
UpperCAmelCase : int = model_output
else:
raise ValueError(
f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" )
if self.config.clip_sample:
UpperCAmelCase : List[Any] = pred_original_sample.clamp(
-self.config.clip_sample_range , self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
UpperCAmelCase : List[Any] = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
UpperCAmelCase : List[str] = sigma_next - sigma_hat
# store for 2nd order step
UpperCAmelCase : Dict = derivative
UpperCAmelCase : Optional[int] = dt
UpperCAmelCase : Optional[Any] = sample
else:
# 2. 2nd order / Heun's method
UpperCAmelCase : Union[str, Any] = (sample - pred_original_sample) / sigma_next
UpperCAmelCase : Optional[Any] = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
UpperCAmelCase : Dict = self.dt
UpperCAmelCase : Tuple = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
UpperCAmelCase : List[Any] = None
UpperCAmelCase : Union[str, Any] = None
UpperCAmelCase : str = None
UpperCAmelCase : Any = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=snake_case )
def A_ ( self , snake_case , snake_case , snake_case , ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.sigmas.to(device=original_samples.device , dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(snake_case ):
# mps does not support float64
UpperCAmelCase : List[Any] = self.timesteps.to(original_samples.device , dtype=torch.floataa )
UpperCAmelCase : Dict = timesteps.to(original_samples.device , dtype=torch.floataa )
else:
UpperCAmelCase : Optional[Any] = self.timesteps.to(original_samples.device )
UpperCAmelCase : Any = timesteps.to(original_samples.device )
UpperCAmelCase : Optional[Any] = [self.index_for_timestep(snake_case , snake_case ) for t in timesteps]
UpperCAmelCase : List[str] = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
UpperCAmelCase : Tuple = sigma.unsqueeze(-1 )
UpperCAmelCase : Any = original_samples + noise * sigma
return noisy_samples
def __len__( self ):
'''simple docstring'''
return self.config.num_train_timesteps
| 609 |
'''simple docstring'''
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class UpperCamelCase__ ( lowercase__ ):
"""simple docstring"""
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(snake_case , "num_attention_heads" ) )
self.parent.assertTrue(hasattr(snake_case , "num_encoder_blocks" ) )
class UpperCamelCase__ :
"""simple docstring"""
def __init__( self , snake_case , snake_case=1_3 , snake_case=6_4 , snake_case=3 , snake_case=4 , snake_case=[2, 2, 2, 2] , snake_case=[8, 4, 2, 1] , snake_case=[1_6, 3_2, 6_4, 1_2_8] , snake_case=[1, 4, 8, 1_6] , snake_case=[1, 2, 4, 8] , snake_case=True , snake_case=True , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=0.02 , snake_case=3 , snake_case=None , ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : str = batch_size
UpperCAmelCase : Dict = image_size
UpperCAmelCase : int = num_channels
UpperCAmelCase : List[Any] = num_encoder_blocks
UpperCAmelCase : Dict = sr_ratios
UpperCAmelCase : Union[str, Any] = depths
UpperCAmelCase : Optional[Any] = hidden_sizes
UpperCAmelCase : Union[str, Any] = downsampling_rates
UpperCAmelCase : Tuple = num_attention_heads
UpperCAmelCase : Tuple = is_training
UpperCAmelCase : Any = use_labels
UpperCAmelCase : str = hidden_act
UpperCAmelCase : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase : Any = attention_probs_dropout_prob
UpperCAmelCase : int = initializer_range
UpperCAmelCase : List[str] = num_labels
UpperCAmelCase : Optional[Any] = scope
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Any = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase : Any = None
if self.use_labels:
UpperCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
UpperCAmelCase : int = self.get_config()
return config, pixel_values, labels
def A_ ( self ):
'''simple docstring'''
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = SegformerModel(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Optional[int] = model(snake_case )
UpperCAmelCase : List[str] = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : Dict = self.num_labels
UpperCAmelCase : Optional[Any] = SegformerForSemanticSegmentation(snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : str = model(snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
UpperCAmelCase : Dict = model(snake_case , labels=snake_case )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def A_ ( self , snake_case , snake_case , snake_case ):
'''simple docstring'''
UpperCAmelCase : List[str] = 1
UpperCAmelCase : List[Any] = SegformerForSemanticSegmentation(config=snake_case )
model.to(snake_case )
model.eval()
UpperCAmelCase : Any = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(snake_case )
UpperCAmelCase : Optional[int] = model(snake_case , labels=snake_case )
self.parent.assertGreater(result.loss , 0.0 )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : Union[str, Any] = config_and_inputs
UpperCAmelCase : List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Any = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ : int = (
{
"feature-extraction": SegformerModel,
"image-classification": SegformerForImageClassification,
"image-segmentation": SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ : Any = True
SCREAMING_SNAKE_CASE__ : List[str] = False
SCREAMING_SNAKE_CASE__ : Union[str, Any] = False
SCREAMING_SNAKE_CASE__ : int = False
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Optional[Any] = SegformerModelTester(self )
UpperCAmelCase : Dict = SegformerConfigTester(self , config_class=snake_case )
def A_ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*snake_case )
@unittest.skip("SegFormer does not use inputs_embeds" )
def A_ ( self ):
'''simple docstring'''
pass
@unittest.skip("SegFormer does not have get_input_embeddings method and get_output_embeddings methods" )
def A_ ( self ):
'''simple docstring'''
pass
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : List[Any] = model_class(snake_case )
UpperCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase : Optional[int] = [*signature.parameters.keys()]
UpperCAmelCase : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case )
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase , UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Tuple = True
for model_class in self.all_model_classes:
UpperCAmelCase : Tuple = True
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : Union[str, Any] = True
UpperCAmelCase : Dict = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
UpperCAmelCase : str = model(**self._prepare_for_class(snake_case , snake_case ) )
UpperCAmelCase : int = outputs.attentions
UpperCAmelCase : Union[str, Any] = sum(self.model_tester.depths )
self.assertEqual(len(snake_case ) , snake_case )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
UpperCAmelCase : Any = True
UpperCAmelCase : int = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
UpperCAmelCase : Union[str, Any] = model(**self._prepare_for_class(snake_case , snake_case ) )
UpperCAmelCase : Any = outputs.attentions
self.assertEqual(len(snake_case ) , snake_case )
# verify the first attentions (first block, first layer)
UpperCAmelCase : Optional[int] = (self.model_tester.image_size // 4) ** 2
UpperCAmelCase : Dict = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
UpperCAmelCase : List[str] = (self.model_tester.image_size // 3_2) ** 2
UpperCAmelCase : int = (self.model_tester.image_size // (3_2 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
UpperCAmelCase : Any = len(snake_case )
# Check attention is always last and order is fine
UpperCAmelCase : Dict = True
UpperCAmelCase : int = True
UpperCAmelCase : Optional[Any] = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
UpperCAmelCase : Optional[Any] = model(**self._prepare_for_class(snake_case , snake_case ) )
self.assertEqual(out_len + 1 , len(snake_case ) )
UpperCAmelCase : int = outputs.attentions
self.assertEqual(len(snake_case ) , snake_case )
# verify the first attentions (first block, first layer)
UpperCAmelCase : Optional[int] = (self.model_tester.image_size // 4) ** 2
UpperCAmelCase : str = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def A_ ( self ):
'''simple docstring'''
def check_hidden_states_output(snake_case , snake_case , snake_case ):
UpperCAmelCase : Optional[Any] = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
UpperCAmelCase : Dict = model(**self._prepare_for_class(snake_case , snake_case ) )
UpperCAmelCase : List[str] = outputs.hidden_states
UpperCAmelCase : Optional[Any] = self.model_tester.num_encoder_blocks
self.assertEqual(len(snake_case ) , snake_case )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
UpperCAmelCase , UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase : Dict = True
check_hidden_states_output(snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase : Union[str, Any] = True
check_hidden_states_output(snake_case , snake_case , snake_case )
def A_ ( self ):
'''simple docstring'''
if not self.model_tester.is_training:
return
UpperCAmelCase , UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase : Dict = True
for model_class in self.all_model_classes:
if model_class in get_values(snake_case ):
continue
UpperCAmelCase : Optional[int] = model_class(snake_case )
model.to(snake_case )
model.train()
UpperCAmelCase : List[Any] = self._prepare_for_class(snake_case , snake_case , return_labels=snake_case )
UpperCAmelCase : Tuple = model(**snake_case ).loss
loss.backward()
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A_ ( self ):
'''simple docstring'''
pass
@slow
def A_ ( self ):
'''simple docstring'''
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : Any = SegformerModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def lowercase ( ):
'''simple docstring'''
UpperCAmelCase : List[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : List[str] = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=snake_case , align=snake_case , do_random_crop=snake_case )
UpperCAmelCase : Any = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
snake_case )
UpperCAmelCase : Optional[int] = prepare_img()
UpperCAmelCase : List[Any] = image_processor(images=snake_case , return_tensors="pt" )
UpperCAmelCase : List[Any] = encoded_inputs.pixel_values.to(snake_case )
with torch.no_grad():
UpperCAmelCase : List[Any] = model(snake_case )
UpperCAmelCase : int = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8) )
self.assertEqual(outputs.logits.shape , snake_case )
UpperCAmelCase : List[str] = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , snake_case , atol=1e-4 ) )
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : Tuple = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=snake_case , align=snake_case , do_random_crop=snake_case )
UpperCAmelCase : Dict = SegformerForSemanticSegmentation.from_pretrained(
"nvidia/segformer-b1-finetuned-cityscapes-1024-1024" ).to(snake_case )
UpperCAmelCase : Any = prepare_img()
UpperCAmelCase : Tuple = image_processor(images=snake_case , return_tensors="pt" )
UpperCAmelCase : Optional[Any] = encoded_inputs.pixel_values.to(snake_case )
with torch.no_grad():
UpperCAmelCase : int = model(snake_case )
UpperCAmelCase : List[Any] = torch.Size((1, model.config.num_labels, 1_2_8, 1_2_8) )
self.assertEqual(outputs.logits.shape , snake_case )
UpperCAmelCase : int = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , snake_case , atol=1e-1 ) )
@slow
def A_ ( self ):
'''simple docstring'''
UpperCAmelCase : str = SegformerImageProcessor(
image_scale=(5_1_2, 5_1_2) , keep_ratio=snake_case , align=snake_case , do_random_crop=snake_case )
UpperCAmelCase : Union[str, Any] = SegformerForSemanticSegmentation.from_pretrained("nvidia/segformer-b0-finetuned-ade-512-512" ).to(
snake_case )
UpperCAmelCase : List[str] = prepare_img()
UpperCAmelCase : Any = image_processor(images=snake_case , return_tensors="pt" )
UpperCAmelCase : List[str] = encoded_inputs.pixel_values.to(snake_case )
with torch.no_grad():
UpperCAmelCase : str = model(snake_case )
UpperCAmelCase : int = outputs.logits.detach().cpu()
UpperCAmelCase : Any = image_processor.post_process_semantic_segmentation(outputs=snake_case , target_sizes=[(5_0_0, 3_0_0)] )
UpperCAmelCase : Tuple = torch.Size((5_0_0, 3_0_0) )
self.assertEqual(segmentation[0].shape , snake_case )
UpperCAmelCase : Tuple = image_processor.post_process_semantic_segmentation(outputs=snake_case )
UpperCAmelCase : List[str] = torch.Size((1_2_8, 1_2_8) )
self.assertEqual(segmentation[0].shape , snake_case )
| 609 | 1 |
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
_lowerCamelCase : Optional[Any] = {
"169M": 1_2,
"430M": 2_4,
"1B5": 2_4,
"3B": 3_2,
"7B": 3_2,
"14B": 4_0,
}
_lowerCamelCase : int = {
"169M": 7_6_8,
"430M": 1_0_2_4,
"1B5": 2_0_4_8,
"3B": 2_5_6_0,
"7B": 4_0_9_6,
"14B": 5_1_2_0,
}
def _UpperCAmelCase (UpperCamelCase_ : Optional[Any] ):
'''simple docstring'''
_lowerCAmelCase : str = list(state_dict.keys() )
for name in state_dict_keys:
_lowerCAmelCase : str = state_dict.pop(UpperCamelCase_ )
# emb -> embedding
if name.startswith("""emb.""" ):
_lowerCAmelCase : str = name.replace("""emb.""" , """embeddings.""" )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith("""blocks.0.ln0""" ):
_lowerCAmelCase : Tuple = name.replace("""blocks.0.ln0""" , """blocks.0.pre_ln""" )
# att -> attention
_lowerCAmelCase : Dict = re.sub(R"""blocks\.(\d+)\.att""" , R"""blocks.\1.attention""" , UpperCamelCase_ )
# ffn -> feed_forward
_lowerCAmelCase : List[Any] = re.sub(R"""blocks\.(\d+)\.ffn""" , R"""blocks.\1.feed_forward""" , UpperCamelCase_ )
# time_mix_k -> time_mix_key and reshape
if name.endswith(""".time_mix_k""" ):
_lowerCAmelCase : Dict = name.replace(""".time_mix_k""" , """.time_mix_key""" )
# time_mix_v -> time_mix_value and reshape
if name.endswith(""".time_mix_v""" ):
_lowerCAmelCase : Any = name.replace(""".time_mix_v""" , """.time_mix_value""" )
# time_mix_r -> time_mix_key and reshape
if name.endswith(""".time_mix_r""" ):
_lowerCAmelCase : List[str] = name.replace(""".time_mix_r""" , """.time_mix_receptance""" )
if name != "head.weight":
_lowerCAmelCase : Optional[int] = """rwkv.""" + name
_lowerCAmelCase : List[Any] = weight
return state_dict
def _UpperCAmelCase (UpperCamelCase_ : Dict , UpperCamelCase_ : Union[str, Any] , UpperCamelCase_ : str , UpperCamelCase_ : Optional[int]=None , UpperCamelCase_ : Dict=None , UpperCamelCase_ : Tuple=False , UpperCamelCase_ : int=None ):
'''simple docstring'''
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print("""No `--tokenizer_file` provided, we will use the default tokenizer.""" )
_lowerCAmelCase : List[str] = 50277
_lowerCAmelCase : Any = AutoTokenizer.from_pretrained("""EleutherAI/gpt-neox-20b""" )
else:
_lowerCAmelCase : int = PreTrainedTokenizerFast(tokenizer_file=UpperCamelCase_ )
_lowerCAmelCase : int = len(UpperCamelCase_ )
tokenizer.save_pretrained(UpperCamelCase_ )
# 2. Build the config
_lowerCAmelCase : List[str] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
_lowerCAmelCase : Any = candidate
break
if size is None:
raise ValueError("""Could not infer the size, please provide it with the `--size` argument.""" )
if size not in possible_sizes:
raise ValueError(F"`size` should be one of {possible_sizes}, got {size}." )
_lowerCAmelCase : List[Any] = RwkvConfig(
vocab_size=UpperCamelCase_ , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(UpperCamelCase_ )
# 3. Download model file then convert state_dict
_lowerCAmelCase : Optional[Any] = hf_hub_download(UpperCamelCase_ , UpperCamelCase_ )
_lowerCAmelCase : Optional[Any] = torch.load(UpperCamelCase_ , map_location="""cpu""" )
_lowerCAmelCase : Any = convert_state_dict(UpperCamelCase_ )
# 4. Split in shards and save
_lowerCAmelCase , _lowerCAmelCase : Dict = shard_checkpoint(UpperCamelCase_ )
for shard_file, shard in shards.items():
torch.save(UpperCamelCase_ , os.path.join(UpperCamelCase_ , UpperCamelCase_ ) )
if index is not None:
_lowerCAmelCase : Tuple = os.path.join(UpperCamelCase_ , UpperCamelCase_ )
# Save the index as well
with open(UpperCamelCase_ , """w""" , encoding="""utf-8""" ) as f:
_lowerCAmelCase : Optional[Any] = json.dumps(UpperCamelCase_ , indent=2 , sort_keys=UpperCamelCase_ ) + """\n"""
f.write(UpperCamelCase_ )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
"""Cleaning up shards. This may error with an OOM error, it this is the case don't worry you still have converted the model.""" )
_lowerCAmelCase : Union[str, Any] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
_lowerCAmelCase : int = torch.load(os.path.join(UpperCamelCase_ , UpperCamelCase_ ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(UpperCamelCase_ , UpperCamelCase_ ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError("""Please provide a `model_name` to push the model to the Hub.""" )
_lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained(UpperCamelCase_ )
model.push_to_hub(UpperCamelCase_ , max_shard_size="""2GB""" )
tokenizer.push_to_hub(UpperCamelCase_ )
if __name__ == "__main__":
_lowerCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--repo_id", default=None, type=str, required=True, help="Repo ID from which to pull the checkpoint."
)
parser.add_argument(
"--checkpoint_file", default=None, type=str, required=True, help="Name of the checkpoint file in the repo."
)
parser.add_argument(
"--output_dir", default=None, type=str, required=True, help="Where to save the converted model."
)
parser.add_argument(
"--tokenizer_file",
default=None,
type=str,
help="Path to the tokenizer file to use (if not provided, only the model is converted).",
)
parser.add_argument(
"--size",
default=None,
type=str,
help="Size of the model. Will be inferred from the `checkpoint_file` if not passed.",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Push to the Hub the converted model.",
)
parser.add_argument(
"--model_name",
default=None,
type=str,
help="Name of the pushed model on the Hub, including the username / organization.",
)
_lowerCamelCase : Tuple = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
)
| 429 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..models.auto import AutoProcessor
from ..models.vision_encoder_decoder import VisionEncoderDecoderModel
from ..utils import is_vision_available
from .base import PipelineTool
if is_vision_available():
from PIL import Image
class __snake_case (_a ):
lowerCAmelCase__ = "naver-clova-ix/donut-base-finetuned-docvqa"
lowerCAmelCase__ = (
"This is a tool that answers a question about an document (pdf). It takes an input named `document` which "
"should be the document containing the information, as well as a `question` that is the question about the "
"document. It returns a text that contains the answer to the question."
)
lowerCAmelCase__ = "document_qa"
lowerCAmelCase__ = AutoProcessor
lowerCAmelCase__ = VisionEncoderDecoderModel
lowerCAmelCase__ = ["image", "text"]
lowerCAmelCase__ = ["text"]
def __init__( self : str , *_UpperCAmelCase : Optional[Any] , **_UpperCAmelCase : List[Any] ) -> Optional[Any]:
'''simple docstring'''
if not is_vision_available():
raise ValueError("""Pillow must be installed to use the DocumentQuestionAnsweringTool.""" )
super().__init__(*_UpperCAmelCase , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE ( self : str , _UpperCAmelCase : "Image" , _UpperCAmelCase : str ) -> Optional[Any]:
'''simple docstring'''
_lowerCAmelCase : List[str] = """<s_docvqa><s_question>{user_input}</s_question><s_answer>"""
_lowerCAmelCase : Dict = task_prompt.replace("""{user_input}""" , _UpperCAmelCase )
_lowerCAmelCase : str = self.pre_processor.tokenizer(
_UpperCAmelCase , add_special_tokens=_UpperCAmelCase , return_tensors="""pt""" ).input_ids
_lowerCAmelCase : Dict = self.pre_processor(_UpperCAmelCase , return_tensors="""pt""" ).pixel_values
return {"decoder_input_ids": decoder_input_ids, "pixel_values": pixel_values}
def SCREAMING_SNAKE_CASE ( self : Dict , _UpperCAmelCase : Union[str, Any] ) -> Optional[int]:
'''simple docstring'''
return self.model.generate(
inputs["""pixel_values"""].to(self.device ) , decoder_input_ids=inputs["""decoder_input_ids"""].to(self.device ) , max_length=self.model.decoder.config.max_position_embeddings , early_stopping=_UpperCAmelCase , pad_token_id=self.pre_processor.tokenizer.pad_token_id , eos_token_id=self.pre_processor.tokenizer.eos_token_id , use_cache=_UpperCAmelCase , num_beams=1 , bad_words_ids=[[self.pre_processor.tokenizer.unk_token_id]] , return_dict_in_generate=_UpperCAmelCase , ).sequences
def SCREAMING_SNAKE_CASE ( self : str , _UpperCAmelCase : Tuple ) -> List[str]:
'''simple docstring'''
_lowerCAmelCase : Dict = self.pre_processor.batch_decode(_UpperCAmelCase )[0]
_lowerCAmelCase : Optional[Any] = sequence.replace(self.pre_processor.tokenizer.eos_token , """""" )
_lowerCAmelCase : Union[str, Any] = sequence.replace(self.pre_processor.tokenizer.pad_token , """""" )
_lowerCAmelCase : List[Any] = re.sub(R"""<.*?>""" , """""" , _UpperCAmelCase , count=1 ).strip() # remove first task start token
_lowerCAmelCase : Tuple = self.pre_processor.tokenajson(_UpperCAmelCase )
return sequence["answer"]
| 429 | 1 |
from __future__ import annotations
from math import pi
def __A ( _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if inductance < 0:
raise ValueError('''Inductance cannot be negative''' )
if frequency < 0:
raise ValueError('''Frequency cannot be negative''' )
if reactance < 0:
raise ValueError('''Inductive reactance cannot be negative''' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 62 |
import os
from pathlib import Path
from unittest.mock import patch
import pytest
import zstandard as zstd
from datasets.download.download_config import DownloadConfig
from datasets.utils.file_utils import (
OfflineModeIsEnabled,
cached_path,
fsspec_get,
fsspec_head,
ftp_get,
ftp_head,
get_from_cache,
http_get,
http_head,
)
__A = '\\n Text data.\n Second line of data.'
__A = 'file'
@pytest.fixture(scope='''session''' )
def __A ( _lowercase ):
'''simple docstring'''
_A = tmp_path_factory.mktemp('''data''' ) / (FILE_PATH + '''.zstd''')
_A = bytes(_lowercase , '''utf-8''' )
with zstd.open(_lowercase , '''wb''' ) as f:
f.write(_lowercase )
return path
@pytest.fixture
def __A ( _lowercase ):
'''simple docstring'''
with open(os.path.join(tmpfs.local_root_dir , _lowercase ) , '''w''' ) as f:
f.write(_lowercase )
return FILE_PATH
@pytest.mark.parametrize('''compression_format''' , ['''gzip''', '''xz''', '''zstd'''] )
def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = {'''gzip''': gz_file, '''xz''': xz_file, '''zstd''': zstd_path}
_A = input_paths[compression_format]
_A = tmp_path / '''cache'''
_A = DownloadConfig(cache_dir=_lowercase , extract_compressed_file=_lowercase )
_A = cached_path(_lowercase , download_config=_lowercase )
with open(_lowercase ) as f:
_A = f.read()
with open(_lowercase ) as f:
_A = f.read()
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize('''default_extracted''' , [True, False] )
@pytest.mark.parametrize('''default_cache_dir''' , [True, False] )
def __A ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ):
'''simple docstring'''
_A = '''custom_cache'''
_A = '''custom_extracted_dir'''
_A = tmp_path / '''custom_extracted_path'''
if default_extracted:
_A = ('''downloads''' if default_cache_dir else custom_cache_dir, '''extracted''')
else:
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_DIR''' , _lowercase )
monkeypatch.setattr('''datasets.config.EXTRACTED_DATASETS_PATH''' , str(_lowercase ) )
_A = custom_extracted_path.parts[-2:] if default_cache_dir else (custom_cache_dir, custom_extracted_dir)
_A = xz_file
_A = (
DownloadConfig(extract_compressed_file=_lowercase )
if default_cache_dir
else DownloadConfig(cache_dir=tmp_path / custom_cache_dir , extract_compressed_file=_lowercase )
)
_A = cached_path(_lowercase , download_config=_lowercase )
assert Path(_lowercase ).parent.parts[-2:] == expected
def __A ( _lowercase ):
'''simple docstring'''
_A = str(Path(_lowercase ).resolve() )
assert cached_path(_lowercase ) == text_file
# relative path
_A = str(Path(_lowercase ).resolve().relative_to(Path(os.getcwd() ) ) )
assert cached_path(_lowercase ) == text_file
def __A ( _lowercase ):
'''simple docstring'''
_A = str(tmp_path.resolve() / '''__missing_file__.txt''' )
with pytest.raises(_lowercase ):
cached_path(_lowercase )
# relative path
_A = '''./__missing_file__.txt'''
with pytest.raises(_lowercase ):
cached_path(_lowercase )
def __A ( _lowercase ):
'''simple docstring'''
_A = get_from_cache(f"""tmp://{tmpfs_file}""" )
with open(_lowercase ) as f:
_A = f.read()
assert output_file_content == FILE_CONTENT
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase )
def __A ( ):
'''simple docstring'''
with pytest.raises(_lowercase ):
cached_path('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase )
def __A ( _lowercase ):
'''simple docstring'''
_A = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_lowercase ):
http_get('''https://huggingface.co''' , temp_file=_lowercase )
with pytest.raises(_lowercase ):
http_head('''https://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase )
def __A ( _lowercase ):
'''simple docstring'''
_A = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_lowercase ):
ftp_get('''ftp://huggingface.co''' , temp_file=_lowercase )
with pytest.raises(_lowercase ):
ftp_head('''ftp://huggingface.co''' )
@patch('''datasets.config.HF_DATASETS_OFFLINE''' , _lowercase )
def __A ( _lowercase ):
'''simple docstring'''
_A = tmp_path_factory.mktemp('''data''' ) / '''file.html'''
with pytest.raises(_lowercase ):
fsspec_get('''s3://huggingface.co''' , temp_file=_lowercase )
with pytest.raises(_lowercase ):
fsspec_head('''s3://huggingface.co''' )
| 62 | 1 |
'''simple docstring'''
import argparse
from copy import deepcopy
import numpy as np
from datasets import ClassLabel, DatasetDict, load_dataset
from evaluate import load
from transformers import (
AutoModelForSequenceClassification,
AutoTokenizer,
DataCollatorWithPadding,
Trainer,
TrainerCallback,
TrainingArguments,
set_seed,
)
def lowerCAmelCase_ ( ):
a__ = argparse.ArgumentParser()
parser.add_argument('--model_ckpt' , type=a , default='microsoft/unixcoder-base-nine' )
parser.add_argument('--num_epochs' , type=a , default=5 )
parser.add_argument('--batch_size' , type=a , default=6 )
parser.add_argument('--gradient_accumulation_steps' , type=a , default=1 )
parser.add_argument('--freeze' , type=a , default=a )
parser.add_argument('--learning_rate' , type=a , default=5e-4 )
parser.add_argument('--seed' , type=a , default=0 )
parser.add_argument('--lr_scheduler_type' , type=a , default='cosine' )
parser.add_argument('--num_warmup_steps' , type=a , default=10 )
parser.add_argument('--weight_decay' , type=a , default=0.01 )
parser.add_argument('--output_dir' , type=a , default='./results' )
return parser.parse_args()
__A : Dict = load('accuracy')
def lowerCAmelCase_ ( a : Dict ):
a__ , a__ = eval_pred
a__ = np.argmax(a , axis=1 )
return metric.compute(predictions=a , references=a )
class _UpperCamelCase ( _A ):
'''simple docstring'''
def __init__( self , _a ):
"""simple docstring"""
super().__init__()
a__ = trainer
def lowercase__ ( self , _a , _a , _a , **_a ):
"""simple docstring"""
if control.should_evaluate:
a__ = deepcopy(_a )
self._trainer.evaluate(eval_dataset=self._trainer.train_dataset , metric_key_prefix='train' )
return control_copy
def lowerCAmelCase_ ( ):
a__ = get_args()
set_seed(args.seed )
a__ = load_dataset('codeparrot/codecomplex' , split='train' )
a__ = dataset.train_test_split(test_size=0.2 )
a__ = train_test['test'].train_test_split(test_size=0.5 )
a__ = DatasetDict(
{
'train': train_test['train'],
'test': test_validation['train'],
'valid': test_validation['test'],
} )
print('Loading tokenizer and model' )
a__ = AutoTokenizer.from_pretrained(args.model_ckpt )
a__ = tokenizer.eos_token
a__ = AutoModelForSequenceClassification.from_pretrained(args.model_ckpt , num_labels=7 )
a__ = model.config.eos_token_id
if args.freeze:
for param in model.roberta.parameters():
a__ = False
a__ = ClassLabel(num_classes=7 , names=list(set(train_test_validation['train']['complexity'] ) ) )
def tokenize(a : Dict ):
a__ = tokenizer(example['src'] , truncation=a , max_length=1024 )
a__ = labels.straint(example['complexity'] )
return {
"input_ids": inputs["input_ids"],
"attention_mask": inputs["attention_mask"],
"label": label,
}
a__ = train_test_validation.map(
a , batched=a , remove_columns=train_test_validation['train'].column_names , )
a__ = DataCollatorWithPadding(tokenizer=a )
a__ = TrainingArguments(
output_dir=args.output_dir , learning_rate=args.learning_rate , lr_scheduler_type=args.lr_scheduler_type , evaluation_strategy='epoch' , save_strategy='epoch' , logging_strategy='epoch' , per_device_train_batch_size=args.batch_size , per_device_eval_batch_size=args.batch_size , num_train_epochs=args.num_epochs , gradient_accumulation_steps=args.gradient_accumulation_steps , weight_decay=0.01 , metric_for_best_model='accuracy' , run_name='complexity-java' , report_to='wandb' , )
a__ = Trainer(
model=a , args=a , train_dataset=tokenized_datasets['train'] , eval_dataset=tokenized_datasets['valid'] , tokenizer=a , data_collator=a , compute_metrics=a , )
print('Training...' )
trainer.add_callback(CustomCallback(a ) )
trainer.train()
if __name__ == "__main__":
main()
| 394 |
'''simple docstring'''
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def lowerCAmelCase_ ( a : Optional[int] , a : Tuple=False ):
try:
a__ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
a__ = default
else:
# KEY is set, convert it to True or False.
try:
a__ = strtobool(a )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
__A : Optional[int] = parse_flag_from_env('RUN_SLOW', default=False)
def lowerCAmelCase_ ( a : List[str] ):
return unittest.skip('Test was skipped' )(a )
def lowerCAmelCase_ ( a : Union[str, Any] ):
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(a )
def lowerCAmelCase_ ( a : Dict ):
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(a )
def lowerCAmelCase_ ( a : str ):
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(a )
def lowerCAmelCase_ ( a : Union[str, Any] ):
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(a )
def lowerCAmelCase_ ( a : Union[str, Any] ):
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(a )
def lowerCAmelCase_ ( a : Optional[int] ):
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(a )
def lowerCAmelCase_ ( a : Optional[Any] ):
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(a )
def lowerCAmelCase_ ( a : List[str] ):
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(a )
def lowerCAmelCase_ ( a : Tuple ):
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(a )
def lowerCAmelCase_ ( a : Dict ):
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(a )
def lowerCAmelCase_ ( a : List[str] ):
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(a )
def lowerCAmelCase_ ( a : Dict ):
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(a )
def lowerCAmelCase_ ( a : Tuple ):
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(a )
def lowerCAmelCase_ ( a : int ):
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(a )
def lowerCAmelCase_ ( a : Union[str, Any] ):
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(a )
def lowerCAmelCase_ ( a : int=None , a : Dict=None ):
if test_case is None:
return partial(a , version=a )
return unittest.skipUnless(is_torch_version('>=' , a ) , f'''test requires torch version >= {version}''' )(a )
def lowerCAmelCase_ ( a : Any ):
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(a )
def lowerCAmelCase_ ( a : str ):
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(a )
def lowerCAmelCase_ ( a : int ):
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(a )
__A : Optional[Any] = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def lowerCAmelCase_ ( a : int ):
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(a )
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE:int = True
@classmethod
def lowercase__ ( cls ):
"""simple docstring"""
a__ = tempfile.mkdtemp()
@classmethod
def lowercase__ ( cls ):
"""simple docstring"""
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def lowercase__ ( self ):
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('**/*' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(_a )
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self ):
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def lowercase__ ( self , _a ):
"""simple docstring"""
a__ = mocks if isinstance(_a , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def lowerCAmelCase_ ( a : List[str] ):
a__ = AcceleratorState()
a__ = tensor[None].clone().to(state.device )
a__ = gather(a ).cpu()
a__ = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , a ):
return False
return True
class _UpperCamelCase :
'''simple docstring'''
def __init__( self , _a , _a , _a ):
"""simple docstring"""
a__ = returncode
a__ = stdout
a__ = stderr
async def lowerCAmelCase_ ( a : Any , a : int ):
while True:
a__ = await stream.readline()
if line:
callback(a )
else:
break
async def lowerCAmelCase_ ( a : int , a : Tuple=None , a : Optional[Any]=None , a : Tuple=None , a : str=False , a : Dict=False ):
if echo:
print('\nRunning: ' , ' '.join(a ) )
a__ = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=a , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=a , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
a__ = []
a__ = []
def tee(a : str , a : Optional[Any] , a : Any , a : Optional[int]="" ):
a__ = line.decode('utf-8' ).rstrip()
sink.append(a )
if not quiet:
print(a , a , file=a )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda a : tee(a , a , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda a : tee(a , a , sys.stderr , label='stderr:' ) ) ),
] , timeout=a , )
return _RunOutput(await p.wait() , a , a )
def lowerCAmelCase_ ( a : Union[str, Any] , a : str=None , a : Dict=None , a : List[Any]=180 , a : Optional[Any]=False , a : int=True ):
a__ = asyncio.get_event_loop()
a__ = loop.run_until_complete(
_stream_subprocess(a , env=a , stdin=a , timeout=a , quiet=a , echo=a ) )
a__ = ' '.join(a )
if result.returncode > 0:
a__ = '\n'.join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
return result
class _UpperCamelCase ( _A ):
'''simple docstring'''
pass
def lowerCAmelCase_ ( a : List[str] , a : Dict=False ):
try:
a__ = subprocess.check_output(a , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(a , 'decode' ):
a__ = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'''Command `{' '.join(a )}` failed with the following error:\n\n{e.output.decode()}''' ) from e
| 394 | 1 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''',
'''BridgeTower/bridgetower-base-itm-mlm''': (
'''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'''
),
}
class _snake_case ( __snake_case ):
"""simple docstring"""
a = "bridgetower_vision_model"
def __init__( self : Union[str, Any] , _A : Union[str, Any]=7_6_8 , _A : Tuple=1_2 , _A : List[Any]=3 , _A : Dict=1_6 , _A : str=2_8_8 , _A : Optional[Any]=1 , _A : Union[str, Any]=1e-05 , _A : Any=False , _A : int=True , _A : Optional[int]=False , **_A : int , ):
"""simple docstring"""
super().__init__(**_A)
_SCREAMING_SNAKE_CASE : int = hidden_size
_SCREAMING_SNAKE_CASE : int = num_hidden_layers
_SCREAMING_SNAKE_CASE : Optional[int] = num_channels
_SCREAMING_SNAKE_CASE : str = patch_size
_SCREAMING_SNAKE_CASE : int = image_size
_SCREAMING_SNAKE_CASE : Tuple = initializer_factor
_SCREAMING_SNAKE_CASE : str = layer_norm_eps
_SCREAMING_SNAKE_CASE : Optional[Any] = stop_gradient
_SCREAMING_SNAKE_CASE : Any = share_layernorm
_SCREAMING_SNAKE_CASE : int = remove_last_layer
@classmethod
def _lowerCAmelCase ( cls : Optional[int] , _A : Union[str, os.PathLike] , **_A : Union[str, Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Any = cls.get_config_dict(_A , **_A)
if config_dict.get("""model_type""") == "bridgetower":
_SCREAMING_SNAKE_CASE : List[str] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(_A , **_A)
class _snake_case ( __snake_case ):
"""simple docstring"""
a = "bridgetower_text_model"
def __init__( self : Any , _A : int=5_0_2_6_5 , _A : Union[str, Any]=7_6_8 , _A : int=1_2 , _A : Tuple=1_2 , _A : Any=1 , _A : List[Any]=3_0_7_2 , _A : str="gelu" , _A : List[Any]=0.1 , _A : Union[str, Any]=0.1 , _A : List[str]=5_1_4 , _A : Union[str, Any]=1 , _A : str=1e-05 , _A : Dict=1 , _A : Union[str, Any]=0 , _A : Any=2 , _A : Dict="absolute" , _A : Dict=True , **_A : Any , ):
"""simple docstring"""
super().__init__(**_A)
_SCREAMING_SNAKE_CASE : int = vocab_size
_SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
_SCREAMING_SNAKE_CASE : Any = num_hidden_layers
_SCREAMING_SNAKE_CASE : Optional[int] = num_attention_heads
_SCREAMING_SNAKE_CASE : int = hidden_act
_SCREAMING_SNAKE_CASE : Optional[Any] = initializer_factor
_SCREAMING_SNAKE_CASE : str = intermediate_size
_SCREAMING_SNAKE_CASE : Any = hidden_dropout_prob
_SCREAMING_SNAKE_CASE : Optional[Any] = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE : List[Any] = max_position_embeddings
_SCREAMING_SNAKE_CASE : Optional[int] = type_vocab_size
_SCREAMING_SNAKE_CASE : Optional[int] = layer_norm_eps
_SCREAMING_SNAKE_CASE : Any = position_embedding_type
_SCREAMING_SNAKE_CASE : List[Any] = use_cache
_SCREAMING_SNAKE_CASE : List[Any] = pad_token_id
_SCREAMING_SNAKE_CASE : List[str] = bos_token_id
_SCREAMING_SNAKE_CASE : Union[str, Any] = eos_token_id
@classmethod
def _lowerCAmelCase ( cls : int , _A : Union[str, os.PathLike] , **_A : Tuple):
"""simple docstring"""
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE : Tuple = cls.get_config_dict(_A , **_A)
if config_dict.get("""model_type""") == "bridgetower":
_SCREAMING_SNAKE_CASE : Any = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""") and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""")
return cls.from_dict(_A , **_A)
class _snake_case ( __snake_case ):
"""simple docstring"""
a = "bridgetower"
def __init__( self : List[str] , _A : str=True , _A : Tuple="gelu" , _A : Optional[Any]=7_6_8 , _A : Dict=1 , _A : Tuple=1e-05 , _A : Dict=False , _A : Tuple="add" , _A : Tuple=1_2 , _A : Any=6 , _A : Union[str, Any]=False , _A : Dict=False , _A : str=None , _A : Optional[Any]=None , **_A : Optional[int] , ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : str = kwargs.pop("""text_config_dict""" , _A)
_SCREAMING_SNAKE_CASE : List[str] = kwargs.pop("""vision_config_dict""" , _A)
super().__init__(**_A)
_SCREAMING_SNAKE_CASE : str = share_cross_modal_transformer_layers
_SCREAMING_SNAKE_CASE : int = hidden_act
_SCREAMING_SNAKE_CASE : Optional[int] = hidden_size
_SCREAMING_SNAKE_CASE : Optional[int] = initializer_factor
_SCREAMING_SNAKE_CASE : str = layer_norm_eps
_SCREAMING_SNAKE_CASE : Dict = share_link_tower_layers
_SCREAMING_SNAKE_CASE : Union[str, Any] = link_tower_type
_SCREAMING_SNAKE_CASE : List[Any] = num_attention_heads
_SCREAMING_SNAKE_CASE : Dict = num_hidden_layers
_SCREAMING_SNAKE_CASE : List[str] = tie_word_embeddings
_SCREAMING_SNAKE_CASE : Any = init_layernorm_from_vision_encoder
if text_config is None:
_SCREAMING_SNAKE_CASE : Optional[Any] = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""")
if vision_config is None:
_SCREAMING_SNAKE_CASE : Tuple = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""")
_SCREAMING_SNAKE_CASE : Optional[Any] = BridgeTowerTextConfig(**_A)
_SCREAMING_SNAKE_CASE : Union[str, Any] = BridgeTowerVisionConfig(**_A)
@classmethod
def _lowerCAmelCase ( cls : str , _A : BridgeTowerTextConfig , _A : BridgeTowerVisionConfig , **_A : Union[str, Any]):
"""simple docstring"""
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_A)
def _lowerCAmelCase ( self : List[Any]):
"""simple docstring"""
_SCREAMING_SNAKE_CASE : List[Any] = copy.deepcopy(self.__dict__)
_SCREAMING_SNAKE_CASE : Tuple = self.text_config.to_dict()
_SCREAMING_SNAKE_CASE : Dict = self.vision_config.to_dict()
_SCREAMING_SNAKE_CASE : List[str] = self.__class__.model_type
return output
| 635 | """simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase_ = logging.get_logger(__name__)
lowerCAmelCase_ = {
'''asapp/sew-tiny-100k''': '''https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json''',
# See all SEW models at https://huggingface.co/models?filter=sew
}
class _snake_case ( __snake_case ):
"""simple docstring"""
a = "sew"
def __init__( self : List[Any] , _A : Tuple=3_2 , _A : str=7_6_8 , _A : Dict=1_2 , _A : Tuple=1_2 , _A : Optional[Any]=3_0_7_2 , _A : List[str]=2 , _A : Dict="gelu" , _A : Union[str, Any]=0.1 , _A : Optional[int]=0.1 , _A : Optional[int]=0.1 , _A : Optional[int]=0.0 , _A : str=0.1 , _A : Tuple=0.1 , _A : Optional[int]=0.02 , _A : Dict=1e-5 , _A : str="group" , _A : Tuple="gelu" , _A : Union[str, Any]=(6_4, 1_2_8, 1_2_8, 1_2_8, 1_2_8, 2_5_6, 2_5_6, 2_5_6, 2_5_6, 5_1_2, 5_1_2, 5_1_2, 5_1_2) , _A : Optional[Any]=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) , _A : Any=(1_0, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) , _A : Tuple=False , _A : Tuple=1_2_8 , _A : int=1_6 , _A : Union[str, Any]=True , _A : Optional[Any]=0.05 , _A : List[Any]=1_0 , _A : Union[str, Any]=2 , _A : Tuple=0.0 , _A : Union[str, Any]=1_0 , _A : Optional[int]=0 , _A : Union[str, Any]="mean" , _A : Optional[int]=False , _A : List[Any]=False , _A : int=2_5_6 , _A : str=0 , _A : Optional[int]=1 , _A : List[Any]=2 , **_A : Dict , ):
"""simple docstring"""
super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A)
_SCREAMING_SNAKE_CASE : str = hidden_size
_SCREAMING_SNAKE_CASE : Optional[int] = feat_extract_norm
_SCREAMING_SNAKE_CASE : Optional[int] = feat_extract_activation
_SCREAMING_SNAKE_CASE : Dict = list(_A)
_SCREAMING_SNAKE_CASE : int = list(_A)
_SCREAMING_SNAKE_CASE : int = list(_A)
_SCREAMING_SNAKE_CASE : str = conv_bias
_SCREAMING_SNAKE_CASE : Tuple = num_conv_pos_embeddings
_SCREAMING_SNAKE_CASE : List[str] = num_conv_pos_embedding_groups
_SCREAMING_SNAKE_CASE : Tuple = len(self.conv_dim)
_SCREAMING_SNAKE_CASE : Tuple = num_hidden_layers
_SCREAMING_SNAKE_CASE : List[str] = intermediate_size
_SCREAMING_SNAKE_CASE : str = squeeze_factor
_SCREAMING_SNAKE_CASE : Dict = hidden_act
_SCREAMING_SNAKE_CASE : str = num_attention_heads
_SCREAMING_SNAKE_CASE : Dict = hidden_dropout
_SCREAMING_SNAKE_CASE : Tuple = attention_dropout
_SCREAMING_SNAKE_CASE : int = activation_dropout
_SCREAMING_SNAKE_CASE : Any = feat_proj_dropout
_SCREAMING_SNAKE_CASE : str = final_dropout
_SCREAMING_SNAKE_CASE : Union[str, Any] = layerdrop
_SCREAMING_SNAKE_CASE : Any = layer_norm_eps
_SCREAMING_SNAKE_CASE : int = initializer_range
_SCREAMING_SNAKE_CASE : List[Any] = vocab_size
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f"""but is `len(config.conv_dim) = {len(self.conv_dim)}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride)}`, `len(config.conv_kernel) = {len(self.conv_kernel)}`.""")
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_SCREAMING_SNAKE_CASE : List[Any] = apply_spec_augment
_SCREAMING_SNAKE_CASE : List[Any] = mask_time_prob
_SCREAMING_SNAKE_CASE : List[str] = mask_time_length
_SCREAMING_SNAKE_CASE : List[Any] = mask_time_min_masks
_SCREAMING_SNAKE_CASE : List[Any] = mask_feature_prob
_SCREAMING_SNAKE_CASE : int = mask_feature_length
_SCREAMING_SNAKE_CASE : List[Any] = mask_feature_min_masks
# ctc loss
_SCREAMING_SNAKE_CASE : int = ctc_loss_reduction
_SCREAMING_SNAKE_CASE : Optional[int] = ctc_zero_infinity
# sequence classification
_SCREAMING_SNAKE_CASE : Dict = use_weighted_layer_sum
_SCREAMING_SNAKE_CASE : List[str] = classifier_proj_size
@property
def _lowerCAmelCase ( self : Any):
"""simple docstring"""
return functools.reduce(operator.mul , self.conv_stride , 1)
| 635 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import torch
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .modeling_utils import ModelMixin
@dataclass
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_: torch.FloatTensor
class _SCREAMING_SNAKE_CASE ( UpperCAmelCase , UpperCAmelCase ):
'''simple docstring'''
@register_to_config
def __init__( self : Optional[Any] , UpperCAmelCase_ : int = 16 , UpperCAmelCase_ : int = 88 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : int = 1 , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : int = 32 , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : bool = False , UpperCAmelCase_ : Optional[int] = None , UpperCAmelCase_ : str = "geglu" , UpperCAmelCase_ : bool = True , UpperCAmelCase_ : bool = True , ) -> str:
"""simple docstring"""
super().__init__()
_lowerCAmelCase = num_attention_heads
_lowerCAmelCase = attention_head_dim
_lowerCAmelCase = num_attention_heads * attention_head_dim
_lowerCAmelCase = in_channels
_lowerCAmelCase = torch.nn.GroupNorm(num_groups=UpperCAmelCase_ , num_channels=UpperCAmelCase_ , eps=1E-6 , affine=UpperCAmelCase_ )
_lowerCAmelCase = nn.Linear(UpperCAmelCase_ , UpperCAmelCase_ )
# 3. Define transformers blocks
_lowerCAmelCase = nn.ModuleList(
[
BasicTransformerBlock(
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , dropout=UpperCAmelCase_ , cross_attention_dim=UpperCAmelCase_ , activation_fn=UpperCAmelCase_ , attention_bias=UpperCAmelCase_ , double_self_attention=UpperCAmelCase_ , norm_elementwise_affine=UpperCAmelCase_ , )
for d in range(UpperCAmelCase_ )
] )
_lowerCAmelCase = nn.Linear(UpperCAmelCase_ , UpperCAmelCase_ )
def __lowerCamelCase ( self : Tuple , UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : int=None , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : List[Any]=None , UpperCAmelCase_ : Dict=1 , UpperCAmelCase_ : Optional[Any]=None , UpperCAmelCase_ : bool = True , ) -> List[str]:
"""simple docstring"""
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase = hidden_states.shape
_lowerCAmelCase = batch_frames // num_frames
_lowerCAmelCase = hidden_states
_lowerCAmelCase = hidden_states[None, :].reshape(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_lowerCAmelCase = hidden_states.permute(0 , 2 , 1 , 3 , 4 )
_lowerCAmelCase = self.norm(UpperCAmelCase_ )
_lowerCAmelCase = hidden_states.permute(0 , 3 , 4 , 2 , 1 ).reshape(batch_size * height * width , UpperCAmelCase_ , UpperCAmelCase_ )
_lowerCAmelCase = self.proj_in(UpperCAmelCase_ )
# 2. Blocks
for block in self.transformer_blocks:
_lowerCAmelCase = block(
UpperCAmelCase_ , encoder_hidden_states=UpperCAmelCase_ , timestep=UpperCAmelCase_ , cross_attention_kwargs=UpperCAmelCase_ , class_labels=UpperCAmelCase_ , )
# 3. Output
_lowerCAmelCase = self.proj_out(UpperCAmelCase_ )
_lowerCAmelCase = (
hidden_states[None, None, :]
.reshape(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
.permute(0 , 3 , 4 , 1 , 2 )
.contiguous()
)
_lowerCAmelCase = hidden_states.reshape(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
_lowerCAmelCase = hidden_states + residual
if not return_dict:
return (output,)
return TransformerTemporalModelOutput(sample=UpperCAmelCase_ )
| 580 |
"""simple docstring"""
import numpy as np
from cva import COLOR_BGR2GRAY, CV_8UC3, cvtColor, filteraD, imread, imshow, waitKey
def __snake_case ( SCREAMING_SNAKE_CASE: int , SCREAMING_SNAKE_CASE: int , SCREAMING_SNAKE_CASE: int , SCREAMING_SNAKE_CASE: int , SCREAMING_SNAKE_CASE: int , SCREAMING_SNAKE_CASE: int ):
"""simple docstring"""
if (ksize % 2) == 0:
_lowerCAmelCase = ksize + 1
_lowerCAmelCase = np.zeros((ksize, ksize) , dtype=np.floataa )
# each value
for y in range(SCREAMING_SNAKE_CASE ):
for x in range(SCREAMING_SNAKE_CASE ):
# distance from center
_lowerCAmelCase = x - ksize // 2
_lowerCAmelCase = y - ksize // 2
# degree to radiant
_lowerCAmelCase = theta / 180 * np.pi
_lowerCAmelCase = np.cos(_theta )
_lowerCAmelCase = np.sin(_theta )
# get kernel x
_lowerCAmelCase = cos_theta * px + sin_theta * py
# get kernel y
_lowerCAmelCase = -sin_theta * px + cos_theta * py
# fill kernel
_lowerCAmelCase = np.exp(
-(_x**2 + gamma**2 * _y**2) / (2 * sigma**2) ) * np.cos(2 * np.pi * _x / lambd + psi )
return gabor
if __name__ == "__main__":
import doctest
doctest.testmod()
# read original image
_snake_case = imread('''../image_data/lena.jpg''')
# turn image in gray scale value
_snake_case = cvtColor(img, COLOR_BGR2GRAY)
# Apply multiple Kernel to detect edges
_snake_case = np.zeros(gray.shape[:2])
for theta in [0, 3_0, 6_0, 9_0, 1_2_0, 1_5_0]:
_snake_case = gabor_filter_kernel(1_0, 8, theta, 1_0, 0, 0)
out += filteraD(gray, CV_8UC3, kernel_aa)
_snake_case = out / out.max() * 2_5_5
_snake_case = out.astype(np.uinta)
imshow('''Original''', gray)
imshow('''Gabor filter with 20x20 mask and 6 directions''', out)
waitKey(0)
| 580 | 1 |
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = 256
# Modulus to hash a string
lowerCAmelCase_ : List[str] = 1000003
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
_UpperCAmelCase : Dict = len(lowerCAmelCase_ )
_UpperCAmelCase : Union[str, Any] = len(lowerCAmelCase_ )
if p_len > t_len:
return False
_UpperCAmelCase : Any = 0
_UpperCAmelCase : List[Any] = 0
_UpperCAmelCase : Any = 1
# Calculating the hash of pattern and substring of text
for i in range(lowerCAmelCase_ ):
_UpperCAmelCase : Optional[Any] = (ord(pattern[i] ) + p_hash * alphabet_size) % modulus
_UpperCAmelCase : Optional[Any] = (ord(text[i] ) + text_hash * alphabet_size) % modulus
if i == p_len - 1:
continue
_UpperCAmelCase : List[str] = (modulus_power * alphabet_size) % modulus
for i in range(0 , t_len - p_len + 1 ):
if text_hash == p_hash and text[i : i + p_len] == pattern:
return True
if i == t_len - p_len:
continue
# Calculate the https://en.wikipedia.org/wiki/Rolling_hash
_UpperCAmelCase : List[str] = (
(text_hash - ord(text[i] ) * modulus_power) * alphabet_size
+ ord(text[i + p_len] )
) % modulus
return False
def __A ( ):
_UpperCAmelCase : Any = """abc1abc12"""
_UpperCAmelCase : Optional[int] = """alskfjaldsabc1abc1abc12k23adsfabcabc"""
_UpperCAmelCase : Optional[int] = """alskfjaldsk23adsfabcabc"""
assert rabin_karp(lowerCAmelCase_ , lowerCAmelCase_ ) and not rabin_karp(lowerCAmelCase_ , lowerCAmelCase_ )
# Test 2)
_UpperCAmelCase : Any = """ABABX"""
_UpperCAmelCase : Optional[int] = """ABABZABABYABABX"""
assert rabin_karp(lowerCAmelCase_ , lowerCAmelCase_ )
# Test 3)
_UpperCAmelCase : int = """AAAB"""
_UpperCAmelCase : List[str] = """ABAAAAAB"""
assert rabin_karp(lowerCAmelCase_ , lowerCAmelCase_ )
# Test 4)
_UpperCAmelCase : Dict = """abcdabcy"""
_UpperCAmelCase : List[Any] = """abcxabcdabxabcdabcdabcy"""
assert rabin_karp(lowerCAmelCase_ , lowerCAmelCase_ )
# Test 5)
_UpperCAmelCase : List[Any] = """Lü"""
_UpperCAmelCase : List[Any] = """Lüsai"""
assert rabin_karp(lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase : List[str] = """Lue"""
assert not rabin_karp(lowerCAmelCase_ , lowerCAmelCase_ )
print("""Success.""" )
if __name__ == "__main__":
test_rabin_karp()
| 156 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import numpy as np
from utils_multiple_choice import MultipleChoiceDataset, Split, processors
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
lowerCAmelCase_ : Dict = logging.getLogger(__name__)
def __A ( lowerCAmelCase_ , lowerCAmelCase_ ):
return (preds == labels).mean()
@dataclass
class __lowerCAmelCase :
snake_case : str = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
snake_case : Optional[str] = field(
default=__a , metadata={"""help""": """Pretrained config name or path if not the same as model_name"""} )
snake_case : Optional[str] = field(
default=__a , metadata={"""help""": """Pretrained tokenizer name or path if not the same as model_name"""} )
snake_case : Optional[str] = field(
default=__a , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
@dataclass
class __lowerCAmelCase :
snake_case : str = field(metadata={"""help""": """The name of the task to train on: """ + """, """.join(processors.keys() )} )
snake_case : str = field(metadata={"""help""": """Should contain the data files for the task."""} )
snake_case : int = field(
default=1_2_8 , metadata={
"""help""": (
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
)
} , )
snake_case : bool = field(
default=__a , metadata={"""help""": """Overwrite the cached training and evaluation sets"""} )
def __A ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
_UpperCAmelCase : Tuple = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[str] = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"Output directory ({training_args.output_dir}) already exists and is not empty. Use"
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , lowerCAmelCase_ )
# Set seed
set_seed(training_args.seed )
try:
_UpperCAmelCase : Union[str, Any] = processors[data_args.task_name]()
_UpperCAmelCase : int = processor.get_labels()
_UpperCAmelCase : Optional[int] = len(lowerCAmelCase_ )
except KeyError:
raise ValueError("""Task not found: %s""" % (data_args.task_name) )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase : List[str] = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=lowerCAmelCase_ , finetuning_task=data_args.task_name , cache_dir=model_args.cache_dir , )
_UpperCAmelCase : List[Any] = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_UpperCAmelCase : Optional[int] = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=lowerCAmelCase_ , cache_dir=model_args.cache_dir , )
# Get datasets
_UpperCAmelCase : int = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCAmelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.train , )
if training_args.do_train
else None
)
_UpperCAmelCase : Tuple = (
MultipleChoiceDataset(
data_dir=data_args.data_dir , tokenizer=lowerCAmelCase_ , task=data_args.task_name , max_seq_length=data_args.max_seq_length , overwrite_cache=data_args.overwrite_cache , mode=Split.dev , )
if training_args.do_eval
else None
)
def compute_metrics(lowerCAmelCase_ ) -> Dict:
_UpperCAmelCase : Optional[Any] = np.argmax(p.predictions , axis=1 )
return {"acc": simple_accuracy(lowerCAmelCase_ , p.label_ids )}
# Data collator
_UpperCAmelCase : List[str] = DataCollatorWithPadding(lowerCAmelCase_ , pad_to_multiple_of=8 ) if training_args.fpaa else None
# Initialize our Trainer
_UpperCAmelCase : List[Any] = Trainer(
model=lowerCAmelCase_ , args=lowerCAmelCase_ , train_dataset=lowerCAmelCase_ , eval_dataset=lowerCAmelCase_ , compute_metrics=lowerCAmelCase_ , data_collator=lowerCAmelCase_ , )
# Training
if training_args.do_train:
trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
_UpperCAmelCase : Any = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_UpperCAmelCase : int = trainer.evaluate()
_UpperCAmelCase : List[str] = os.path.join(training_args.output_dir , """eval_results.txt""" )
if trainer.is_world_master():
with open(lowerCAmelCase_ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(""" %s = %s""" , lowerCAmelCase_ , lowerCAmelCase_ )
writer.write("""%s = %s\n""" % (key, value) )
results.update(lowerCAmelCase_ )
return results
def __A ( lowerCAmelCase_ ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 156 | 1 |
'''simple docstring'''
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
_a = abs(UpperCamelCase )
_a = 0
while n > 0:
res += n % 10
n //= 10
return res
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
_a = abs(UpperCamelCase )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def snake_case_ (UpperCamelCase : int ):
'''simple docstring'''
return sum(int(UpperCamelCase ) for c in str(abs(UpperCamelCase ) ) )
def snake_case_ ():
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(UpperCamelCase : Callable , UpperCamelCase : int ) -> None:
_a = f'{func.__name__}({value})'
_a = timeit(f'__main__.{call}' , setup='''import __main__''' )
print(f'{call:56} = {func(UpperCamelCase )} -- {timing:.4f} seconds' )
for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(UpperCamelCase , UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 22 |
"""simple docstring"""
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ASTConfig
from transformers.testing_utils import require_torch, require_torchaudio, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_torchaudio_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ASTForAudioClassification, ASTModel
from transformers.models.audio_spectrogram_transformer.modeling_audio_spectrogram_transformer import (
AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_torchaudio_available():
import torchaudio
from transformers import ASTFeatureExtractor
class lowercase__ :
"""simple docstring"""
def __init__( self , _A , _A=1_3 , _A=2 , _A=2_4 , _A=1_6 , _A=True , _A=True , _A=3_2 , _A=5 , _A=4 , _A=3_7 , _A="gelu" , _A=0.1 , _A=0.1 , _A=1_0 , _A=0.02 , _A=None , _A=2 , _A=2 , ):
'''simple docstring'''
UpperCamelCase : Tuple = parent
UpperCamelCase : str = batch_size
UpperCamelCase : Optional[Any] = patch_size
UpperCamelCase : Optional[int] = max_length
UpperCamelCase : str = num_mel_bins
UpperCamelCase : Optional[int] = is_training
UpperCamelCase : str = use_labels
UpperCamelCase : Optional[int] = hidden_size
UpperCamelCase : Union[str, Any] = num_hidden_layers
UpperCamelCase : Optional[Any] = num_attention_heads
UpperCamelCase : Union[str, Any] = intermediate_size
UpperCamelCase : str = hidden_act
UpperCamelCase : Optional[Any] = hidden_dropout_prob
UpperCamelCase : Dict = attention_probs_dropout_prob
UpperCamelCase : Union[str, Any] = type_sequence_label_size
UpperCamelCase : List[Any] = initializer_range
UpperCamelCase : Optional[Any] = scope
UpperCamelCase : List[Any] = frequency_stride
UpperCamelCase : Tuple = time_stride
# in AST, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
UpperCamelCase : Optional[Any] = (self.num_mel_bins - self.patch_size) // self.frequency_stride + 1
UpperCamelCase : Dict = (self.max_length - self.patch_size) // self.time_stride + 1
UpperCamelCase : List[str] = frequency_out_dimension * time_out_dimension
UpperCamelCase : int = num_patches + 2
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Optional[int] = floats_tensor([self.batch_size, self.max_length, self.num_mel_bins] )
UpperCamelCase : int = None
if self.use_labels:
UpperCamelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCamelCase : Dict = self.get_config()
return config, input_values, labels
def _a ( self ):
'''simple docstring'''
return ASTConfig(
patch_size=self.patch_size , max_length=self.max_length , num_mel_bins=self.num_mel_bins , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=_A , initializer_range=self.initializer_range , frequency_stride=self.frequency_stride , time_stride=self.time_stride , )
def _a ( self , _A , _A , _A ):
'''simple docstring'''
UpperCamelCase : List[str] = ASTModel(config=_A )
model.to(_A )
model.eval()
UpperCamelCase : int = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
UpperCamelCase
) , (
UpperCamelCase
) , (
UpperCamelCase
) ,
) : List[str] = config_and_inputs
UpperCamelCase : Union[str, Any] = {"""input_values""": input_values}
return config, inputs_dict
@require_torch
class lowercase__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
__lowerCAmelCase : List[Any] = (
(
ASTModel,
ASTForAudioClassification,
)
if is_torch_available()
else ()
)
__lowerCAmelCase : Any = (
{"""audio-classification""": ASTForAudioClassification, """feature-extraction""": ASTModel}
if is_torch_available()
else {}
)
__lowerCAmelCase : Union[str, Any] = False
__lowerCAmelCase : Any = False
__lowerCAmelCase : List[Any] = False
__lowerCAmelCase : Union[str, Any] = False
def _a ( self , _A , _A , _A , _A , _A ):
'''simple docstring'''
if pipeline_test_casse_name == "AudioClassificationPipelineTests":
return True
return False
def _a ( self ):
'''simple docstring'''
UpperCamelCase : str = ASTModelTester(self )
UpperCamelCase : int = ConfigTester(self , config_class=_A , has_text_modality=_A , hidden_size=3_7 )
def _a ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""AST does not use inputs_embeds""" )
def _a ( self ):
'''simple docstring'''
pass
def _a ( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Tuple = model_class(_A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCamelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_A , nn.Linear ) )
def _a ( self ):
'''simple docstring'''
UpperCamelCase , UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCamelCase : Dict = model_class(_A )
UpperCamelCase : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCamelCase : int = [*signature.parameters.keys()]
UpperCamelCase : Any = ["""input_values"""]
self.assertListEqual(arg_names[:1] , _A )
def _a ( self ):
'''simple docstring'''
UpperCamelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
@slow
def _a ( self ):
'''simple docstring'''
for model_name in AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase : int = ASTModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def UpperCamelCase ():
UpperCamelCase : Optional[Any] = hf_hub_download(
repo_id="""nielsr/audio-spectogram-transformer-checkpoint""" , filename="""sample_audio.flac""" , repo_type="""dataset""" )
UpperCamelCase , UpperCamelCase : Dict = torchaudio.load(SCREAMING_SNAKE_CASE )
return audio, sampling_rate
@require_torch
@require_torchaudio
class lowercase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _a ( self ):
'''simple docstring'''
return (
ASTFeatureExtractor.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" )
if is_torchaudio_available()
else None
)
@slow
def _a ( self ):
'''simple docstring'''
UpperCamelCase : Union[str, Any] = self.default_feature_extractor
UpperCamelCase : Any = ASTForAudioClassification.from_pretrained("""MIT/ast-finetuned-audioset-10-10-0.4593""" ).to(_A )
UpperCamelCase : Union[str, Any] = self.default_feature_extractor
UpperCamelCase , UpperCamelCase : List[Any] = prepare_audio()
UpperCamelCase : Optional[Any] = audio.squeeze().numpy()
UpperCamelCase : Union[str, Any] = feature_extractor(_A , sampling_rate=_A , return_tensors="""pt""" ).to(_A )
# forward pass
with torch.no_grad():
UpperCamelCase : Optional[Any] = model(**_A )
# verify the logits
UpperCamelCase : List[Any] = torch.Size((1, 5_2_7) )
self.assertEqual(outputs.logits.shape , _A )
UpperCamelCase : Optional[Any] = torch.tensor([-0.87_60, -7.00_42, -8.66_02] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1e-4 ) )
| 102 | 0 |
import os
import re
import warnings
from shutil import copyfile
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
if TYPE_CHECKING:
from ...tokenization_utils_base import TextInput
from ...utils import logging
A = logging.get_logger(__name__)
A = {'''vocab_file''': '''spiece.model'''}
A = {
'''vocab_file''': {
'''t5-small''': '''https://huggingface.co/t5-small/resolve/main/spiece.model''',
'''t5-base''': '''https://huggingface.co/t5-base/resolve/main/spiece.model''',
'''t5-large''': '''https://huggingface.co/t5-large/resolve/main/spiece.model''',
'''t5-3b''': '''https://huggingface.co/t5-3b/resolve/main/spiece.model''',
'''t5-11b''': '''https://huggingface.co/t5-11b/resolve/main/spiece.model''',
}
}
# TODO(PVP) - this should be removed in Transformers v5
A = {
'''t5-small''': 512,
'''t5-base''': 512,
'''t5-large''': 512,
'''t5-3b''': 512,
'''t5-11b''': 512,
}
A = '''▁'''
class lowercase__ ( UpperCAmelCase__ ):
A__= VOCAB_FILES_NAMES
A__= PRETRAINED_VOCAB_FILES_MAP
A__= PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A__= ['input_ids', 'attention_mask']
def __init__( self : Any , _lowercase : Optional[Any] , _lowercase : List[Any]="</s>" , _lowercase : Any="<unk>" , _lowercase : Dict="<pad>" , _lowercase : int=1_00 , _lowercase : Optional[Any]=None , _lowercase : Optional[Dict[str, Any]] = None , _lowercase : Tuple=True , **_lowercase : Tuple , ):
"""simple docstring"""
if extra_ids > 0 and additional_special_tokens is None:
UpperCAmelCase__ = [F"""<extra_id_{i}>""" for i in range(__lowerCAmelCase )]
elif extra_ids > 0 and additional_special_tokens is not None:
# Check that we have the right number of extra_id special tokens
UpperCAmelCase__ = len(set(filter(lambda _lowercase : bool("extra_id" in str(__lowerCAmelCase ) ) , __lowerCAmelCase ) ) )
if extra_tokens != extra_ids:
raise ValueError(
F"""Both extra_ids ({extra_ids}) and additional_special_tokens ({additional_special_tokens}) are"""
" provided to T5Tokenizer. In this case the additional_special_tokens must include the extra_ids"
" tokens" )
if legacy:
logger.warning_once(
F"""You are using the legacy behaviour of the {self.__class__}. This means that tokens that come after special tokens will not be properly handled. We recommend you to"""
" read the related pull request available at https://github.com/huggingface/transformers/pull/24565" )
UpperCAmelCase__ = legacy
UpperCAmelCase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__lowerCAmelCase , unk_token=__lowerCAmelCase , pad_token=__lowerCAmelCase , extra_ids=__lowerCAmelCase , additional_special_tokens=__lowerCAmelCase , sp_model_kwargs=self.sp_model_kwargs , legacy=__lowerCAmelCase , **__lowerCAmelCase , )
UpperCAmelCase__ = vocab_file
UpperCAmelCase__ = extra_ids
UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__lowerCAmelCase )
@staticmethod
def _UpperCAmelCase ( _lowercase : str , _lowercase : List[str] , _lowercase : Optional[int] ):
"""simple docstring"""
if pretrained_model_name_or_path in TaTokenizer.max_model_input_sizes:
UpperCAmelCase__ = TaTokenizer.max_model_input_sizes[pretrained_model_name_or_path]
if init_max_model_length is not None and init_max_model_length != max_model_length:
return init_max_model_length
elif init_max_model_length is None:
warnings.warn(
"This tokenizer was incorrectly instantiated with a model max length of"
F""" {deprecated_max_model_length} which will be corrected in Transformers v5.\nFor now, this"""
" behavior is kept to avoid breaking backwards compatibility when padding/encoding with"
" `truncation is True`.\n- Be aware that you SHOULD NOT rely on"
F""" {pretrained_model_name_or_path} automatically truncating your input to"""
F""" {deprecated_max_model_length} when padding/encoding.\n- If you want to encode/pad to sequences"""
F""" longer than {deprecated_max_model_length} you can either instantiate this tokenizer with"""
" `model_max_length` or pass `max_length` when encoding/padding.\n- To avoid this warning, please"
" instantiate this tokenizer with `model_max_length` set to your preferred value." , __lowerCAmelCase , )
return max_model_length
@property
def _UpperCAmelCase ( self : int ):
"""simple docstring"""
return self.sp_model.get_piece_size() + self._extra_ids
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = {self.convert_ids_to_tokens(__lowerCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _UpperCAmelCase ( self : Tuple , _lowercase : List[int] , _lowercase : Optional[List[int]] = None , _lowercase : bool = False ):
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__lowerCAmelCase , token_ids_a=__lowerCAmelCase , already_has_special_tokens=__lowerCAmelCase )
# normal case: some special tokens
if token_ids_a is None:
return ([0] * len(__lowerCAmelCase )) + [1]
return ([0] * len(__lowerCAmelCase )) + [1] + ([0] * len(__lowerCAmelCase )) + [1]
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
return list(
set(filter(lambda _lowercase : bool(re.search(r"<extra_id_\d+>" , __lowerCAmelCase ) ) is not None , self.additional_special_tokens ) ) )
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
return [self._convert_token_to_id(__lowerCAmelCase ) for token in self.get_sentinel_tokens()]
def _UpperCAmelCase ( self : Union[str, Any] , _lowercase : List[int] ):
"""simple docstring"""
if len(__lowerCAmelCase ) > 0 and token_ids[-1] == self.eos_token_id:
warnings.warn(
F"""This sequence already has {self.eos_token}. In future versions this behavior may lead to duplicated"""
" eos tokens being added." )
return token_ids
else:
return token_ids + [self.eos_token_id]
def _UpperCAmelCase ( self : Dict , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
"""simple docstring"""
UpperCAmelCase__ = [self.eos_token_id]
if token_ids_a is None:
return len(token_ids_a + eos ) * [0]
return len(token_ids_a + eos + token_ids_a + eos ) * [0]
def _UpperCAmelCase ( self : List[str] , _lowercase : List[int] , _lowercase : Optional[List[int]] = None ):
"""simple docstring"""
UpperCAmelCase__ = self._add_eos_if_not_present(__lowerCAmelCase )
if token_ids_a is None:
return token_ids_a
else:
UpperCAmelCase__ = self._add_eos_if_not_present(__lowerCAmelCase )
return token_ids_a + token_ids_a
def __getstate__( self : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = self.__dict__.copy()
UpperCAmelCase__ = None
return state
def __setstate__( self : str , _lowercase : List[Any] ):
"""simple docstring"""
UpperCAmelCase__ = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
UpperCAmelCase__ = {}
UpperCAmelCase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCAmelCase ( self : Any , _lowercase : "TextInput" , **_lowercase : Dict ):
"""simple docstring"""
if not self.legacy:
UpperCAmelCase__ = SPIECE_UNDERLINE + text.replace(__lowerCAmelCase , " " )
return super().tokenize(__lowerCAmelCase , **__lowerCAmelCase )
def _UpperCAmelCase ( self : List[Any] , _lowercase : Tuple , **_lowercase : Dict ):
"""simple docstring"""
if not self.legacy:
UpperCAmelCase__ = text.startswith(__lowerCAmelCase )
if is_first:
UpperCAmelCase__ = text[1:]
UpperCAmelCase__ = self.sp_model.encode(__lowerCAmelCase , out_type=__lowerCAmelCase )
if not self.legacy and not is_first and not text.startswith(" " ) and tokens[0].startswith(__lowerCAmelCase ):
UpperCAmelCase__ = ([tokens[0][1:]] if len(tokens[0] ) > 1 else []) + tokens[1:]
return tokens
def _UpperCAmelCase ( self : List[str] , _lowercase : List[str] ):
"""simple docstring"""
if token.startswith("<extra_id_" ):
UpperCAmelCase__ = re.match(r"<extra_id_(\d+)>" , __lowerCAmelCase )
UpperCAmelCase__ = int(match.group(1 ) )
return self.vocab_size - num - 1
return self.sp_model.piece_to_id(__lowerCAmelCase )
def _UpperCAmelCase ( self : List[Any] , _lowercase : Optional[Any] ):
"""simple docstring"""
if index < self.sp_model.get_piece_size():
UpperCAmelCase__ = self.sp_model.IdToPiece(__lowerCAmelCase )
else:
UpperCAmelCase__ = F"""<extra_id_{self.vocab_size - 1 - index}>"""
return token
def _UpperCAmelCase ( self : Dict , _lowercase : str ):
"""simple docstring"""
UpperCAmelCase__ = []
UpperCAmelCase__ = ""
UpperCAmelCase__ = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__lowerCAmelCase ) + token
UpperCAmelCase__ = True
UpperCAmelCase__ = []
else:
current_sub_tokens.append(__lowerCAmelCase )
UpperCAmelCase__ = False
out_string += self.sp_model.decode(__lowerCAmelCase )
return out_string.strip()
def _UpperCAmelCase ( self : Dict , _lowercase : str , _lowercase : Optional[str] = None ):
"""simple docstring"""
if not os.path.isdir(__lowerCAmelCase ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase__ = os.path.join(
__lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__lowerCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __lowerCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__lowerCAmelCase , "wb" ) as fi:
UpperCAmelCase__ = self.sp_model.serialized_model_proto()
fi.write(__lowerCAmelCase )
return (out_vocab_file,)
| 710 |
from __future__ import annotations
class lowercase__ :
def __init__( self : int , _lowercase : list[list[int]] ):
"""simple docstring"""
UpperCAmelCase__ = TypeError(
"Matrices must be formed from a list of zero or more lists containing at "
"least one and the same number of values, each of which must be of type "
"int or float." )
if len(_lowercase ) != 0:
UpperCAmelCase__ = len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(_lowercase ) != cols:
raise error
for value in row:
if not isinstance(_lowercase , (int, float) ):
raise error
UpperCAmelCase__ = rows
else:
UpperCAmelCase__ = []
def _UpperCAmelCase ( self : Dict ):
"""simple docstring"""
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
return len(self.rows )
@property
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
return len(self.rows[0] )
@property
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
return (self.num_rows, self.num_columns)
@property
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
return self.order[0] == self.order[1]
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
UpperCAmelCase__ = [
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(_lowercase )
def _UpperCAmelCase ( self : Tuple ):
"""simple docstring"""
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def _UpperCAmelCase ( self : Any ):
"""simple docstring"""
return bool(self.determinant() )
def _UpperCAmelCase ( self : Optional[Any] , _lowercase : int , _lowercase : int ):
"""simple docstring"""
UpperCAmelCase__ = [
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(_lowercase ).determinant()
def _UpperCAmelCase ( self : str , _lowercase : int , _lowercase : int ):
"""simple docstring"""
if (row + column) % 2 == 0:
return self.get_minor(_lowercase , _lowercase )
return -1 * self.get_minor(_lowercase , _lowercase )
def _UpperCAmelCase ( self : Optional[Any] ):
"""simple docstring"""
return Matrix(
[
[self.get_minor(_lowercase , _lowercase ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def _UpperCAmelCase ( self : str ):
"""simple docstring"""
UpperCAmelCase__ = [
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(_lowercase )
def _UpperCAmelCase ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = self.determinant()
if not determinant:
raise TypeError("Only matrices with a non-zero determinant have an inverse" )
return self.adjugate() * (1 / determinant)
def __repr__( self : Dict ):
"""simple docstring"""
return str(self.rows )
def __str__( self : Tuple ):
"""simple docstring"""
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"[" + ". ".join([str(_lowercase ) for value in row] ) + ".]"
for row in self.rows
] )
+ "]"
)
def _UpperCAmelCase ( self : Any , _lowercase : list[int] , _lowercase : int | None = None ):
"""simple docstring"""
UpperCAmelCase__ = TypeError("Row must be a list containing all ints and/or floats" )
if not isinstance(_lowercase , _lowercase ):
raise type_error
for value in row:
if not isinstance(_lowercase , (int, float) ):
raise type_error
if len(_lowercase ) != self.num_columns:
raise ValueError(
"Row must be equal in length to the other rows in the matrix" )
if position is None:
self.rows.append(_lowercase )
else:
UpperCAmelCase__ = self.rows[0:position] + [row] + self.rows[position:]
def _UpperCAmelCase ( self : Optional[Any] , _lowercase : list[int] , _lowercase : int | None = None ):
"""simple docstring"""
UpperCAmelCase__ = TypeError(
"Column must be a list containing all ints and/or floats" )
if not isinstance(_lowercase , _lowercase ):
raise type_error
for value in column:
if not isinstance(_lowercase , (int, float) ):
raise type_error
if len(_lowercase ) != self.num_rows:
raise ValueError(
"Column must be equal in length to the other columns in the matrix" )
if position is None:
UpperCAmelCase__ = [self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
UpperCAmelCase__ = [
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self : Dict , _lowercase : object ):
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
return NotImplemented
return self.rows == other.rows
def __ne__( self : List[Any] , _lowercase : object ):
"""simple docstring"""
return not self == other
def __neg__( self : int ):
"""simple docstring"""
return self * -1
def __add__( self : List[Any] , _lowercase : Matrix ):
"""simple docstring"""
if self.order != other.order:
raise ValueError("Addition requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self : Optional[int] , _lowercase : Matrix ):
"""simple docstring"""
if self.order != other.order:
raise ValueError("Subtraction requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self : int , _lowercase : Matrix | int | float ):
"""simple docstring"""
if isinstance(_lowercase , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(_lowercase , _lowercase ):
if self.num_columns != other.num_rows:
raise ValueError(
"The number of columns in the first matrix must "
"be equal to the number of rows in the second" )
return Matrix(
[
[Matrix.dot_product(_lowercase , _lowercase ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"A Matrix can only be multiplied by an int, float, or another matrix" )
def __pow__( self : Optional[Any] , _lowercase : int ):
"""simple docstring"""
if not isinstance(_lowercase , _lowercase ):
raise TypeError("A Matrix can only be raised to the power of an int" )
if not self.is_square:
raise ValueError("Only square matrices can be raised to a power" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"Only invertable matrices can be raised to a negative power" )
UpperCAmelCase__ = self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def _UpperCAmelCase ( cls : List[Any] , _lowercase : list[int] , _lowercase : list[int] ):
"""simple docstring"""
return sum(row[i] * column[i] for i in range(len(_lowercase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 277 | 0 |
import itertools
from dataclasses import dataclass
from typing import Any, Callable, Dict, List, Optional, Union
import pandas as pd
import pyarrow as pa
import datasets
import datasets.config
from datasets.features.features import require_storage_cast
from datasets.table import table_cast
from datasets.utils.py_utils import Literal
UpperCamelCase__ = datasets.utils.logging.get_logger(__name__)
UpperCamelCase__ = ['''names''', '''prefix''']
UpperCamelCase__ = ['''warn_bad_lines''', '''error_bad_lines''', '''mangle_dupe_cols''']
UpperCamelCase__ = ['''encoding_errors''', '''on_bad_lines''']
UpperCamelCase__ = ['''date_format''']
@dataclass
class __snake_case ( datasets.BuilderConfig ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = ","
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = "infer"
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = "."
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = '"'
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = 1_00_00
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = "strict"
__SCREAMING_SNAKE_CASE = "error"
__SCREAMING_SNAKE_CASE = None
def a ( self ) -> List[Any]:
"""simple docstring"""
if self.delimiter is not None:
__snake_case = self.delimiter
if self.column_names is not None:
__snake_case = self.column_names
@property
def a ( self ) -> List[Any]:
"""simple docstring"""
__snake_case = {
"""sep""": self.sep,
"""header""": self.header,
"""names""": self.names,
"""index_col""": self.index_col,
"""usecols""": self.usecols,
"""prefix""": self.prefix,
"""mangle_dupe_cols""": self.mangle_dupe_cols,
"""engine""": self.engine,
"""converters""": self.converters,
"""true_values""": self.true_values,
"""false_values""": self.false_values,
"""skipinitialspace""": self.skipinitialspace,
"""skiprows""": self.skiprows,
"""nrows""": self.nrows,
"""na_values""": self.na_values,
"""keep_default_na""": self.keep_default_na,
"""na_filter""": self.na_filter,
"""verbose""": self.verbose,
"""skip_blank_lines""": self.skip_blank_lines,
"""thousands""": self.thousands,
"""decimal""": self.decimal,
"""lineterminator""": self.lineterminator,
"""quotechar""": self.quotechar,
"""quoting""": self.quoting,
"""escapechar""": self.escapechar,
"""comment""": self.comment,
"""encoding""": self.encoding,
"""dialect""": self.dialect,
"""error_bad_lines""": self.error_bad_lines,
"""warn_bad_lines""": self.warn_bad_lines,
"""skipfooter""": self.skipfooter,
"""doublequote""": self.doublequote,
"""memory_map""": self.memory_map,
"""float_precision""": self.float_precision,
"""chunksize""": self.chunksize,
"""encoding_errors""": self.encoding_errors,
"""on_bad_lines""": self.on_bad_lines,
"""date_format""": self.date_format,
}
# some kwargs must not be passed if they don't have a default value
# some others are deprecated and we can also not pass them if they are the default value
for pd_read_csv_parameter in _PANDAS_READ_CSV_NO_DEFAULT_PARAMETERS + _PANDAS_READ_CSV_DEPRECATED_PARAMETERS:
if pd_read_csv_kwargs[pd_read_csv_parameter] == getattr(CsvConfig() , snake_case_ ):
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 2.0 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 2):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_2_0_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
# Remove 1.3 new arguments
if not (datasets.config.PANDAS_VERSION.major >= 1 and datasets.config.PANDAS_VERSION.minor >= 3):
for pd_read_csv_parameter in _PANDAS_READ_CSV_NEW_1_3_0_PARAMETERS:
del pd_read_csv_kwargs[pd_read_csv_parameter]
return pd_read_csv_kwargs
class __snake_case ( datasets.ArrowBasedBuilder ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = CsvConfig
def a ( self ) -> Optional[Any]:
"""simple docstring"""
return datasets.DatasetInfo(features=self.config.features )
def a ( self , _UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
if not self.config.data_files:
raise ValueError(F'At least one data file must be specified, but got data_files={self.config.data_files}' )
__snake_case = dl_manager.download_and_extract(self.config.data_files )
if isinstance(snake_case_ , (str, list, tuple) ):
__snake_case = data_files
if isinstance(snake_case_ , snake_case_ ):
__snake_case = [files]
__snake_case = [dl_manager.iter_files(snake_case_ ) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"""files""": files} )]
__snake_case = []
for split_name, files in data_files.items():
if isinstance(snake_case_ , snake_case_ ):
__snake_case = [files]
__snake_case = [dl_manager.iter_files(snake_case_ ) for file in files]
splits.append(datasets.SplitGenerator(name=snake_case_ , gen_kwargs={"""files""": files} ) )
return splits
def a ( self , _UpperCamelCase ) -> pa.Table:
"""simple docstring"""
if self.config.features is not None:
__snake_case = self.config.features.arrow_schema
if all(not require_storage_cast(snake_case_ ) for feature in self.config.features.values() ):
# cheaper cast
__snake_case = pa.Table.from_arrays([pa_table[field.name] for field in schema] , schema=snake_case_ )
else:
# more expensive cast; allows str <-> int/float or str to Audio for example
__snake_case = table_cast(snake_case_ , snake_case_ )
return pa_table
def a ( self , _UpperCamelCase ) -> Tuple:
"""simple docstring"""
__snake_case = self.config.features.arrow_schema if self.config.features else None
# dtype allows reading an int column as str
__snake_case = (
{
name: dtype.to_pandas_dtype() if not require_storage_cast(snake_case_ ) else object
for name, dtype, feature in zip(schema.names , schema.types , self.config.features.values() )
}
if schema is not None
else None
)
for file_idx, file in enumerate(itertools.chain.from_iterable(snake_case_ ) ):
__snake_case = pd.read_csv(snake_case_ , iterator=snake_case_ , dtype=snake_case_ , **self.config.pd_read_csv_kwargs )
try:
for batch_idx, df in enumerate(snake_case_ ):
__snake_case = pa.Table.from_pandas(snake_case_ )
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(snake_case_ )
except ValueError as e:
logger.error(F'Failed to read file \'{file}\' with error {type(snake_case_ )}: {e}' )
raise
| 268 |
"""simple docstring"""
import unittest
from transformers import SPIECE_UNDERLINE, XLNetTokenizer, XLNetTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE_ = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
@require_tokenizers
class a ( _SCREAMING_SNAKE_CASE, unittest.TestCase ):
"""simple docstring"""
A__ : Union[str, Any] = XLNetTokenizer
A__ : Tuple = XLNetTokenizerFast
A__ : List[Any] = True
A__ : int = True
def __A ( self ) -> Dict:
super().setUp()
# We have a SentencePiece fixture for testing
_UpperCAmelCase = XLNetTokenizer(snake_case_ , keep_accents=snake_case_ )
tokenizer.sanitize_special_tokens()
tokenizer.save_pretrained(self.tmpdirname )
def __A ( self ) -> Optional[int]:
_UpperCAmelCase = "<s>"
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case_ ) , snake_case_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case_ ) , snake_case_ )
def __A ( self ) -> Optional[int]:
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "<eod>" )
self.assertEqual(len(snake_case_ ) , 1006 )
def __A ( self ) -> int:
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def __A ( self ) -> Tuple:
_UpperCAmelCase = XLNetTokenizer(snake_case_ , keep_accents=snake_case_ )
_UpperCAmelCase = tokenizer.tokenize("This is a test" )
self.assertListEqual(snake_case_ , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case_ ) , [285, 46, 10, 170, 382] )
_UpperCAmelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
snake_case_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(snake_case_ )
self.assertListEqual(snake_case_ , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] )
_UpperCAmelCase = tokenizer.convert_ids_to_tokens(snake_case_ )
self.assertListEqual(
snake_case_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
def __A ( self ) -> List[Any]:
_UpperCAmelCase = XLNetTokenizer(snake_case_ , do_lower_case=snake_case_ )
_UpperCAmelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
snake_case_ , [
SPIECE_UNDERLINE + "",
"i",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"se",
".",
] , )
self.assertListEqual(tokenizer.tokenize("H\u00E9llo" ) , ["▁he", "ll", "o"] )
def __A ( self ) -> Tuple:
_UpperCAmelCase = XLNetTokenizer(snake_case_ , do_lower_case=snake_case_ )
_UpperCAmelCase = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
snake_case_ , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"se",
".",
] , )
@slow
def __A ( self ) -> Union[str, Any]:
_UpperCAmelCase = XLNetTokenizer.from_pretrained("xlnet-base-cased" )
_UpperCAmelCase = tokenizer.encode("sequence builders" , add_special_tokens=snake_case_ )
_UpperCAmelCase = tokenizer.encode("multi-sequence build" , add_special_tokens=snake_case_ )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(snake_case_ )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(snake_case_ , snake_case_ )
assert encoded_sentence == text + [4, 3]
assert encoded_pair == text + [4] + text_a + [4, 3]
@slow
def __A ( self ) -> Union[str, Any]:
# fmt: off
_UpperCAmelCase = {"input_ids": [[17, 21442, 270, 17, 10, 14645, 318, 34, 17, 4546, 3145, 787, 13, 7752, 22018, 23, 21, 17, 4546, 3145, 787, 13, 3352, 14431, 13, 5500, 11, 1176, 580, 13, 16819, 4797, 23, 17, 10, 17135, 658, 19, 457, 7932, 13, 184, 19, 3154, 17135, 6468, 19, 1404, 12269, 19, 4229, 5356, 16264, 46, 19, 17, 20545, 10395, 9, 9, 9, 11, 28, 6421, 9531, 20729, 17, 10, 353, 17022, 11, 21, 6421, 9531, 16949, 17, 10, 11509, 753, 11, 33, 95, 2421, 7385, 956, 14431, 2626, 25, 842, 7385, 4836, 21, 1429, 2272, 9855, 3120, 161, 24738, 19, 13203, 658, 218, 787, 21, 430, 18482, 847, 2637, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 322, 22178, 27, 1064, 22, 956, 13, 11101, 1429, 5854, 24313, 18953, 40, 422, 24366, 68, 1758, 37, 10483, 14257, 31, 207, 263, 21, 203, 3773, 25, 71, 9735, 9, 4, 3], [5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 32, 2049, 3442, 17, 13894, 3380, 23, 95, 18, 17634, 2288, 9, 4, 3]], "token_type_ids": [[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2], [3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=snake_case_ , model_name="xlnet-base-cased" , revision="c841166438c31ec7ca9a106dee7bb312b73ae511" , )
| 426 | 0 |
'''simple docstring'''
def a ( UpperCamelCase_ : str , UpperCamelCase_ : str ) -> str:
if not (isinstance(UpperCamelCase_ , UpperCamelCase_ ) and isinstance(UpperCamelCase_ , UpperCamelCase_ )):
raise ValueError('longest_common_substring() takes two strings for inputs' )
snake_case__ =len(UpperCamelCase_ )
snake_case__ =len(UpperCamelCase_ )
snake_case__ =[[0] * (texta_length + 1) for _ in range(texta_length + 1 )]
snake_case__ =0
snake_case__ =0
for i in range(1 , texta_length + 1 ):
for j in range(1 , texta_length + 1 ):
if texta[i - 1] == texta[j - 1]:
snake_case__ =1 + dp[i - 1][j - 1]
if dp[i][j] > ans_length:
snake_case__ =i
snake_case__ =dp[i][j]
return texta[ans_index - ans_length : ans_index]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 581 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ : Tuple = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ : List[str] = {
'''google/pix2struct-textcaps-base''': (
'''https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'''
),
}
class a__( snake_case__ ):
a_ : Dict = '''pix2struct_text_model'''
a_ : Optional[int] = ['''past_key_values''']
a_ : int = {
'''hidden_size''': '''hidden_size''',
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , _UpperCAmelCase=5_0244 , _UpperCAmelCase=768 , _UpperCAmelCase=64 , _UpperCAmelCase=2048 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase=32 , _UpperCAmelCase=128 , _UpperCAmelCase=0.1 , _UpperCAmelCase=1E-6 , _UpperCAmelCase=1.0 , _UpperCAmelCase="gelu_new" , _UpperCAmelCase=0 , _UpperCAmelCase=False , _UpperCAmelCase=0 , _UpperCAmelCase=1 , _UpperCAmelCase=False , _UpperCAmelCase=True , **_UpperCAmelCase , ) -> int:
snake_case__ =vocab_size
snake_case__ =hidden_size
snake_case__ =d_kv
snake_case__ =d_ff
snake_case__ =num_layers
snake_case__ =num_heads
snake_case__ =relative_attention_num_buckets
snake_case__ =relative_attention_max_distance
snake_case__ =dropout_rate
snake_case__ =layer_norm_epsilon
snake_case__ =initializer_factor
snake_case__ =use_cache
snake_case__ =eos_token_id
snake_case__ =decoder_start_token_id
# for backwards compatibility
snake_case__ =dense_act_fn
super().__init__(
pad_token_id=_UpperCAmelCase , eos_token_id=_UpperCAmelCase , decoder_start_token_id=_UpperCAmelCase , tie_word_embeddings=_UpperCAmelCase , is_decoder=_UpperCAmelCase , **_UpperCAmelCase , )
@classmethod
def _lowercase ( cls , _UpperCAmelCase , **_UpperCAmelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_UpperCAmelCase )
snake_case__ , snake_case__ =cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
snake_case__ =config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class a__( snake_case__ ):
a_ : List[Any] = '''pix2struct_vision_model'''
def __init__( self , _UpperCAmelCase=768 , _UpperCAmelCase=768 , _UpperCAmelCase=2048 , _UpperCAmelCase=64 , _UpperCAmelCase=12 , _UpperCAmelCase=12 , _UpperCAmelCase="gelu_new" , _UpperCAmelCase=1E-6 , _UpperCAmelCase=0.0 , _UpperCAmelCase=0.0 , _UpperCAmelCase=1E-10 , _UpperCAmelCase=1.0 , _UpperCAmelCase=4096 , _UpperCAmelCase=32 , _UpperCAmelCase=128 , **_UpperCAmelCase , ) -> int:
super().__init__(**_UpperCAmelCase )
snake_case__ =hidden_size
snake_case__ =patch_embed_hidden_size
snake_case__ =d_ff
snake_case__ =dropout_rate
snake_case__ =num_hidden_layers
snake_case__ =num_attention_heads
snake_case__ =initializer_range
snake_case__ =initializer_factor
snake_case__ =attention_dropout
snake_case__ =layer_norm_eps
snake_case__ =dense_act_fn
snake_case__ =seq_len
snake_case__ =relative_attention_num_buckets
snake_case__ =relative_attention_max_distance
snake_case__ =d_kv
@classmethod
def _lowercase ( cls , _UpperCAmelCase , **_UpperCAmelCase ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_UpperCAmelCase )
snake_case__ , snake_case__ =cls.get_config_dict(_UpperCAmelCase , **_UpperCAmelCase )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get('model_type' ) == "pix2struct":
snake_case__ =config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(_UpperCAmelCase , **_UpperCAmelCase )
class a__( snake_case__ ):
a_ : Dict = '''pix2struct'''
a_ : Optional[int] = True
def __init__( self , _UpperCAmelCase=None , _UpperCAmelCase=None , _UpperCAmelCase=1.0 , _UpperCAmelCase=0.02 , _UpperCAmelCase=False , _UpperCAmelCase=False , _UpperCAmelCase=True , **_UpperCAmelCase , ) -> int:
super().__init__(tie_word_embeddings=_UpperCAmelCase , is_encoder_decoder=_UpperCAmelCase , **_UpperCAmelCase )
if text_config is None:
snake_case__ ={}
logger.info('text_config is None. Initializing the Pix2StructTextConfig with default values.' )
if vision_config is None:
snake_case__ ={}
logger.info('vision_config is None. Initializing the Pix2StructVisionConfig with default values.' )
snake_case__ =PixaStructTextConfig(**_UpperCAmelCase )
snake_case__ =PixaStructVisionConfig(**_UpperCAmelCase )
snake_case__ =self.text_config.decoder_start_token_id
snake_case__ =self.text_config.pad_token_id
snake_case__ =self.text_config.eos_token_id
snake_case__ =initializer_factor
snake_case__ =initializer_range
snake_case__ =self.initializer_range
snake_case__ =self.initializer_range
snake_case__ =is_vqa
@classmethod
def _lowercase ( cls , _UpperCAmelCase , _UpperCAmelCase , **_UpperCAmelCase ) -> Any:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_UpperCAmelCase )
def _lowercase ( self ) -> Optional[int]:
snake_case__ =copy.deepcopy(self.__dict__ )
snake_case__ =self.text_config.to_dict()
snake_case__ =self.vision_config.to_dict()
snake_case__ =self.__class__.model_type
return output
| 581 | 1 |
'''simple docstring'''
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class UpperCAmelCase :
def __init__( self , __A , __A=13 , __A=32 , __A=2 , __A=3 , __A=16 , __A=[1, 2, 1] , __A=[2, 2, 4] , __A=2 , __A=2.0 , __A=True , __A=0.0 , __A=0.0 , __A=0.1 , __A="gelu" , __A=False , __A=True , __A=0.0_2 , __A=1E-5 , __A=True , __A=None , __A=True , __A=10 , __A=8 , __A=["stage1", "stage2", "stage3"] , __A=[1, 2, 3] , ):
__UpperCAmelCase = parent
__UpperCAmelCase = batch_size
__UpperCAmelCase = image_size
__UpperCAmelCase = patch_size
__UpperCAmelCase = num_channels
__UpperCAmelCase = embed_dim
__UpperCAmelCase = depths
__UpperCAmelCase = num_heads
__UpperCAmelCase = window_size
__UpperCAmelCase = mlp_ratio
__UpperCAmelCase = qkv_bias
__UpperCAmelCase = hidden_dropout_prob
__UpperCAmelCase = attention_probs_dropout_prob
__UpperCAmelCase = drop_path_rate
__UpperCAmelCase = hidden_act
__UpperCAmelCase = use_absolute_embeddings
__UpperCAmelCase = patch_norm
__UpperCAmelCase = layer_norm_eps
__UpperCAmelCase = initializer_range
__UpperCAmelCase = is_training
__UpperCAmelCase = scope
__UpperCAmelCase = use_labels
__UpperCAmelCase = type_sequence_label_size
__UpperCAmelCase = encoder_stride
__UpperCAmelCase = out_features
__UpperCAmelCase = out_indices
def __lowerCamelCase ( self ):
__UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase = None
if self.use_labels:
__UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def __lowerCamelCase ( self , __A , __A , __A ):
__UpperCAmelCase = MaskFormerSwinModel(config=__A )
model.to(__A )
model.eval()
__UpperCAmelCase = model(__A )
__UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __lowerCamelCase ( self , __A , __A , __A ):
__UpperCAmelCase = MaskFormerSwinBackbone(config=__A )
model.to(__A )
model.eval()
__UpperCAmelCase = model(__A )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(__A ):
__UpperCAmelCase = ['stem']
__UpperCAmelCase = MaskFormerSwinBackbone(config=__A )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = config_and_inputs
__UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class UpperCAmelCase ( __lowercase , __lowercase , unittest.TestCase ):
_A : Tuple = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
_A : int = {"""feature-extraction""": MaskFormerSwinModel} if is_torch_available() else {}
_A : Tuple = False
_A : int = False
_A : Optional[Any] = False
_A : Dict = False
_A : int = False
def __lowerCamelCase ( self ):
__UpperCAmelCase = MaskFormerSwinModelTester(self )
__UpperCAmelCase = ConfigTester(self , config_class=__A , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCamelCase ( self ):
return
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __lowerCamelCase ( self ):
__UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__A )
@unittest.skip('Swin does not use inputs_embeds' )
def __lowerCamelCase ( self ):
pass
@unittest.skip('Swin does not support feedforward chunking' )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(__A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A , nn.Linear ) )
def __lowerCamelCase ( self ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(__A )
__UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase = [*signature.parameters.keys()]
__UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , __A )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def __lowerCamelCase ( self ):
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self , __A , __A , __A , __A ):
__UpperCAmelCase = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
__UpperCAmelCase = model(**self._prepare_for_class(__A , __A ) )
__UpperCAmelCase = outputs.hidden_states
__UpperCAmelCase = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__A ) , __A )
# Swin has a different seq_length
__UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __lowerCamelCase ( self ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__UpperCAmelCase = True
self.check_hidden_states_output(__A , __A , __A , __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase = True
self.check_hidden_states_output(__A , __A , __A , __A )
def __lowerCamelCase ( self ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase = 3
__UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__UpperCAmelCase = True
self.check_hidden_states_output(__A , __A , __A , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase = True
self.check_hidden_states_output(__A , __A , __A , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def __lowerCamelCase ( self ):
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def __lowerCamelCase ( self ):
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(__A ):
__UpperCAmelCase = 0
return t
def check_equivalence(__A , __A , __A , __A={} ):
with torch.no_grad():
__UpperCAmelCase = model(**__A , return_dict=__A , **__A )
__UpperCAmelCase = model(**__A , return_dict=__A , **__A ).to_tuple()
def recursive_check(__A , __A ):
if isinstance(__A , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(__A , __A ):
recursive_check(__A , __A )
elif isinstance(__A , __A ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(__A , __A )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(__A ) , set_nan_tensor_to_zero(__A ) , atol=1E-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
f' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'
f' {torch.isnan(__A ).any()} and `inf`: {torch.isinf(__A )}. Dict has'
f' `nan`: {torch.isnan(__A ).any()} and `inf`: {torch.isinf(__A )}.'
) , )
recursive_check(__A , __A )
for model_class in self.all_model_classes:
__UpperCAmelCase = model_class(__A )
model.to(__A )
model.eval()
__UpperCAmelCase = self._prepare_for_class(__A , __A )
__UpperCAmelCase = self._prepare_for_class(__A , __A )
check_equivalence(__A , __A , __A )
__UpperCAmelCase = self._prepare_for_class(__A , __A , return_labels=__A )
__UpperCAmelCase = self._prepare_for_class(__A , __A , return_labels=__A )
check_equivalence(__A , __A , __A )
__UpperCAmelCase = self._prepare_for_class(__A , __A )
__UpperCAmelCase = self._prepare_for_class(__A , __A )
check_equivalence(__A , __A , __A , {'output_hidden_states': True} )
__UpperCAmelCase = self._prepare_for_class(__A , __A , return_labels=__A )
__UpperCAmelCase = self._prepare_for_class(__A , __A , return_labels=__A )
check_equivalence(__A , __A , __A , {'output_hidden_states': True} )
@require_torch
class UpperCAmelCase ( unittest.TestCase , __lowercase ):
_A : Union[str, Any] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
_A : Union[str, Any] = MaskFormerSwinConfig
def __lowerCamelCase ( self ):
__UpperCAmelCase = MaskFormerSwinModelTester(self )
def __lowerCamelCase ( self ):
__UpperCAmelCase , __UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
__UpperCAmelCase = backbone_class(__A )
backbone.to(__A )
backbone.eval()
__UpperCAmelCase = backbone(**__A )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , __A )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__UpperCAmelCase = backbone(**__A , output_hidden_states=__A )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__UpperCAmelCase = backbone(**__A , output_attentions=__A )
self.assertIsNotNone(outputs.attentions )
| 126 |
"""simple docstring"""
import os
import unittest
from transformers import LayoutLMTokenizer, LayoutLMTokenizerFast
from transformers.models.layoutlm.tokenization_layoutlm import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __lowerCamelCase ( __lowercase , unittest.TestCase ):
__UpperCamelCase = LayoutLMTokenizer
__UpperCamelCase = LayoutLMTokenizerFast
__UpperCamelCase = True
__UpperCamelCase = True
def A__ (self ):
'''simple docstring'''
super().setUp()
_lowerCAmelCase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def A__ (self , **lowerCamelCase ):
'''simple docstring'''
return LayoutLMTokenizer.from_pretrained(self.tmpdirname , **lowerCamelCase )
def A__ (self , lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase = """UNwant\u00E9d,running"""
_lowerCAmelCase = """unwanted, running"""
return input_text, output_text
def A__ (self ):
'''simple docstring'''
_lowerCAmelCase = self.tokenizer_class(self.vocab_file )
_lowerCAmelCase = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(lowerCamelCase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCamelCase ) , [7, 4, 5, 10, 8, 9] )
def A__ (self ):
'''simple docstring'''
pass | 156 | 0 |
'''simple docstring'''
from typing import Any, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from torch.utils.data import DistributedSampler, RandomSampler
from transformers import PreTrainedModel, Trainer, logging
from transformers.integrations import is_fairscale_available
from transformers.models.fsmt.configuration_fsmt import FSMTConfig
from transformers.optimization import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.trainer_pt_utils import get_tpu_sampler
from transformers.training_args import ParallelMode
from transformers.utils import is_torch_tpu_available
if is_fairscale_available():
from fairscale.optim import OSS
_lowercase = logging.get_logger(__name__)
_lowercase = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
'constant': get_constant_schedule,
'constant_w_warmup': get_constant_schedule_with_warmup,
}
class _lowercase ( __a ):
def __init__( self , A__=None , A__=None , *A__ , **A__ ) -> Union[str, Any]:
super().__init__(*A__ , **A__ )
if config is None:
assert isinstance(self.model , A__ ), (
"If no `config` is passed the model to be trained has to be of type `PreTrainedModel`, but is"
F""" {self.model.__class__}"""
)
snake_case = self.model.config
else:
snake_case = config
snake_case = data_args
snake_case = self.config.tgt_vocab_size if isinstance(self.config , A__ ) else self.config.vocab_size
if self.args.label_smoothing != 0 or (self.data_args is not None and self.data_args.ignore_pad_token_for_loss):
assert self.config.pad_token_id is not None, (
"Make sure that `config.pad_token_id` is correcly defined when ignoring `pad_token` for loss"
" calculation or doing label smoothing."
)
if self.config.pad_token_id is None and self.config.eos_token_id is not None:
logger.warning(
F"""The `config.pad_token_id` is `None`. Using `config.eos_token_id` = {self.config.eos_token_id} for"""
''' padding..''' )
if self.args.label_smoothing == 0:
snake_case = torch.nn.CrossEntropyLoss(ignore_index=self.config.pad_token_id )
else:
# dynamically import label_smoothed_nll_loss
from utils import label_smoothed_nll_loss
snake_case = label_smoothed_nll_loss
def UpperCamelCase ( self , A__ ) -> Tuple:
if self.optimizer is None:
snake_case = ['''bias''', '''LayerNorm.weight''']
snake_case = [
{
'''params''': [p for n, p in self.model.named_parameters() if not any(nd in n for nd in no_decay )],
'''weight_decay''': self.args.weight_decay,
},
{
'''params''': [p for n, p in self.model.named_parameters() if any(nd in n for nd in no_decay )],
'''weight_decay''': 0.0,
},
]
snake_case = Adafactor if self.args.adafactor else AdamW
if self.args.adafactor:
snake_case = Adafactor
snake_case = {'''scale_parameter''': False, '''relative_step''': False}
else:
snake_case = AdamW
snake_case = {
'''betas''': (self.args.adam_betaa, self.args.adam_betaa),
'''eps''': self.args.adam_epsilon,
}
snake_case = self.args.learning_rate
if self.sharded_ddp:
snake_case = OSS(
params=A__ , optim=A__ , **A__ , )
else:
snake_case = optimizer_cls(A__ , **A__ )
if self.lr_scheduler is None:
snake_case = self._get_lr_scheduler(A__ )
else: # ignoring --lr_scheduler
logger.warning('''scheduler is passed to `Seq2SeqTrainer`, `--lr_scheduler` arg is ignored.''' )
def UpperCamelCase ( self , A__ ) -> Tuple:
snake_case = arg_to_scheduler[self.args.lr_scheduler]
if self.args.lr_scheduler == "constant":
snake_case = schedule_func(self.optimizer )
elif self.args.lr_scheduler == "constant_w_warmup":
snake_case = schedule_func(self.optimizer , num_warmup_steps=self.args.warmup_steps )
else:
snake_case = schedule_func(
self.optimizer , num_warmup_steps=self.args.warmup_steps , num_training_steps=A__ )
return scheduler
def UpperCamelCase ( self ) -> Optional[torch.utils.data.Sampler]:
if isinstance(self.train_dataset , torch.utils.data.IterableDataset ):
return None
elif is_torch_tpu_available():
return get_tpu_sampler(self.train_dataset )
else:
if self.args.sortish_sampler:
self.train_dataset.make_sortish_sampler(
self.args.per_device_train_batch_size , distributed=(self.args.parallel_mode == ParallelMode.DISTRIBUTED) , )
return (
RandomSampler(self.train_dataset )
if self.args.local_rank == -1
else DistributedSampler(self.train_dataset )
)
def UpperCamelCase ( self , A__ , A__ , A__ ) -> List[Any]:
if self.args.label_smoothing == 0:
if self.data_args is not None and self.data_args.ignore_pad_token_for_loss:
# force training to ignore pad token
snake_case = model(**A__ , use_cache=A__ )[0]
snake_case = self.loss_fn(logits.view(-1 , logits.shape[-1] ) , labels.view(-1 ) )
else:
# compute usual loss via models
snake_case , snake_case = model(**A__ , labels=A__ , use_cache=A__ )[:2]
else:
# compute label smoothed loss
snake_case = model(**A__ , use_cache=A__ )[0]
snake_case = torch.nn.functional.log_softmax(A__ , dim=-1 )
snake_case , snake_case = self.loss_fn(A__ , A__ , self.args.label_smoothing , ignore_index=self.config.pad_token_id )
return loss, logits
def UpperCamelCase ( self , A__ , A__ ) -> Any:
snake_case = inputs.pop('''labels''' )
snake_case , snake_case = self._compute_loss(A__ , A__ , A__ )
return loss
def UpperCamelCase ( self , A__ , A__ , A__ , A__ = None , ) -> Tuple[Optional[float], Optional[torch.Tensor], Optional[torch.Tensor]]:
snake_case = self._prepare_inputs(A__ )
snake_case = {
'''max_length''': self.data_args.val_max_target_length
if self.data_args is not None
else self.config.max_length,
'''num_beams''': self.data_args.eval_beams if self.data_args is not None else self.config.num_beams,
}
if self.args.predict_with_generate and not self.args.prediction_loss_only:
snake_case = self.model.generate(
inputs['''input_ids'''] , attention_mask=inputs['''attention_mask'''] , **A__ , )
# in case the batch is shorter than max length, the output should be padded
if generated_tokens.shape[-1] < gen_kwargs["max_length"]:
snake_case = self._pad_tensors_to_max_len(A__ , gen_kwargs['''max_length'''] )
snake_case = inputs.pop('''labels''' )
with torch.no_grad():
# compute loss on predict data
snake_case , snake_case = self._compute_loss(A__ , A__ , A__ )
snake_case = loss.mean().detach()
if self.args.prediction_loss_only:
return (loss, None, None)
snake_case = generated_tokens if self.args.predict_with_generate else logits
if labels.shape[-1] < gen_kwargs["max_length"]:
snake_case = self._pad_tensors_to_max_len(A__ , gen_kwargs['''max_length'''] )
return (loss, logits, labels)
def UpperCamelCase ( self , A__ , A__ ) -> List[str]:
# If PAD token is not defined at least EOS token has to be defined
snake_case = self.config.pad_token_id if self.config.pad_token_id is not None else self.config.eos_token_id
if pad_token_id is None:
raise ValueError(
'''Make sure that either `config.pad_token_id` or `config.eos_token_id` is defined if tensor has to be'''
F""" padded to `max_length`={max_length}""" )
snake_case = pad_token_id * torch.ones(
(tensor.shape[0], max_length) , dtype=tensor.dtype , device=tensor.device )
snake_case = tensor
return padded_tensor
| 44 |
'''simple docstring'''
def __UpperCamelCase ( a : int , a : int ) ->int:
while b:
snake_case , snake_case = b, a % b
return a
def __UpperCamelCase ( a : int , a : int ) ->int:
return a if b == 0 else euclidean_gcd_recursive(a , a % b )
def __UpperCamelCase ( ) ->Optional[Any]:
print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 , 5 )}""" )
print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 , 3 )}""" )
print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 , 3 )}""" )
print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 , 6 )}""" )
print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 , 5 )}""" )
print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 , 3 )}""" )
print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 , 3 )}""" )
print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 , 6 )}""" )
print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 , 3 )}""" )
if __name__ == "__main__":
main()
| 44 | 1 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""BridgeTower/bridgetower-base""": """https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json""",
"""BridgeTower/bridgetower-base-itm-mlm""": (
"""https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json"""
),
}
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'bridgetower_vision_model'
def __init__( self : Optional[Any] , UpperCAmelCase__ : Union[str, Any]=768 , UpperCAmelCase__ : List[Any]=12 , UpperCAmelCase__ : List[Any]=3 , UpperCAmelCase__ : Dict=16 , UpperCAmelCase__ : Tuple=288 , UpperCAmelCase__ : str=1 , UpperCAmelCase__ : int=1E-05 , UpperCAmelCase__ : Union[str, Any]=False , UpperCAmelCase__ : List[str]=True , UpperCAmelCase__ : List[str]=False , **UpperCAmelCase__ : Dict , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase__ )
lowercase : List[str] =hidden_size
lowercase : Optional[Any] =num_hidden_layers
lowercase : Dict =num_channels
lowercase : Optional[int] =patch_size
lowercase : Union[str, Any] =image_size
lowercase : Optional[int] =initializer_factor
lowercase : List[Any] =layer_norm_eps
lowercase : Dict =stop_gradient
lowercase : int =share_layernorm
lowercase : Any =remove_last_layer
@classmethod
def lowerCamelCase_ ( cls : Optional[Any] , UpperCAmelCase__ : Union[str, os.PathLike] , **UpperCAmelCase__ : List[str] ):
'''simple docstring'''
lowercase , lowercase : Tuple =cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
if config_dict.get('''model_type''' ) == "bridgetower":
lowercase : Dict =config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'bridgetower_text_model'
def __init__( self : Any , UpperCAmelCase__ : Tuple=50265 , UpperCAmelCase__ : List[Any]=768 , UpperCAmelCase__ : Optional[int]=12 , UpperCAmelCase__ : int=12 , UpperCAmelCase__ : str=1 , UpperCAmelCase__ : List[Any]=3072 , UpperCAmelCase__ : Optional[Any]="gelu" , UpperCAmelCase__ : int=0.1 , UpperCAmelCase__ : Tuple=0.1 , UpperCAmelCase__ : Union[str, Any]=514 , UpperCAmelCase__ : int=1 , UpperCAmelCase__ : Optional[int]=1E-05 , UpperCAmelCase__ : List[str]=1 , UpperCAmelCase__ : Tuple=0 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : Union[str, Any]="absolute" , UpperCAmelCase__ : Dict=True , **UpperCAmelCase__ : List[Any] , ):
'''simple docstring'''
super().__init__(**UpperCAmelCase__ )
lowercase : str =vocab_size
lowercase : Union[str, Any] =hidden_size
lowercase : int =num_hidden_layers
lowercase : Dict =num_attention_heads
lowercase : Dict =hidden_act
lowercase : Tuple =initializer_factor
lowercase : str =intermediate_size
lowercase : Optional[int] =hidden_dropout_prob
lowercase : Dict =attention_probs_dropout_prob
lowercase : Any =max_position_embeddings
lowercase : List[str] =type_vocab_size
lowercase : Any =layer_norm_eps
lowercase : Optional[int] =position_embedding_type
lowercase : Optional[Any] =use_cache
lowercase : List[str] =pad_token_id
lowercase : Optional[Any] =bos_token_id
lowercase : Union[str, Any] =eos_token_id
@classmethod
def lowerCamelCase_ ( cls : Union[str, Any] , UpperCAmelCase__ : Union[str, os.PathLike] , **UpperCAmelCase__ : Any ):
'''simple docstring'''
lowercase , lowercase : Union[str, Any] =cls.get_config_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
if config_dict.get('''model_type''' ) == "bridgetower":
lowercase : int =config_dict['''text_config''']
if "model_type" in config_dict and hasattr(cls , '''model_type''' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'''You are using a model of type {config_dict["model_type"]} to instantiate a model of type '''
F'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(UpperCAmelCase__ , **UpperCAmelCase__ )
class __SCREAMING_SNAKE_CASE ( lowercase__ ):
lowerCamelCase_ = 'bridgetower'
def __init__( self : Dict , UpperCAmelCase__ : str=True , UpperCAmelCase__ : str="gelu" , UpperCAmelCase__ : Any=768 , UpperCAmelCase__ : List[Any]=1 , UpperCAmelCase__ : str=1E-05 , UpperCAmelCase__ : List[str]=False , UpperCAmelCase__ : List[str]="add" , UpperCAmelCase__ : str=12 , UpperCAmelCase__ : Any=6 , UpperCAmelCase__ : Optional[int]=False , UpperCAmelCase__ : Dict=False , UpperCAmelCase__ : str=None , UpperCAmelCase__ : Dict=None , **UpperCAmelCase__ : Tuple , ):
'''simple docstring'''
# TODO: remove this once the Hub files are updated.
lowercase : List[str] =kwargs.pop('''text_config_dict''' , UpperCAmelCase__ )
lowercase : Tuple =kwargs.pop('''vision_config_dict''' , UpperCAmelCase__ )
super().__init__(**UpperCAmelCase__ )
lowercase : Optional[Any] =share_cross_modal_transformer_layers
lowercase : Optional[Any] =hidden_act
lowercase : Dict =hidden_size
lowercase : Optional[int] =initializer_factor
lowercase : Optional[Any] =layer_norm_eps
lowercase : Tuple =share_link_tower_layers
lowercase : Optional[Any] =link_tower_type
lowercase : Dict =num_attention_heads
lowercase : List[Any] =num_hidden_layers
lowercase : Dict =tie_word_embeddings
lowercase : Any =init_layernorm_from_vision_encoder
if text_config is None:
lowercase : List[str] ={}
logger.info('''`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.''' )
if vision_config is None:
lowercase : List[str] ={}
logger.info('''`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.''' )
lowercase : Optional[int] =BridgeTowerTextConfig(**UpperCAmelCase__ )
lowercase : List[str] =BridgeTowerVisionConfig(**UpperCAmelCase__ )
@classmethod
def lowerCamelCase_ ( cls : List[Any] , UpperCAmelCase__ : BridgeTowerTextConfig , UpperCAmelCase__ : BridgeTowerVisionConfig , **UpperCAmelCase__ : List[Any] ):
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **UpperCAmelCase__ )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
lowercase : List[Any] =copy.deepcopy(self.__dict__ )
lowercase : int =self.text_config.to_dict()
lowercase : Tuple =self.vision_config.to_dict()
lowercase : int =self.__class__.model_type
return output
| 92 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
a__ = """"""
a__ = """"""
a__ = """"""
a__ = 1 # (0 is vertical, 1 is horizontal)
def _UpperCAmelCase ( ):
snake_case__ , snake_case__ = get_dataset(a , a )
print("""Processing...""" )
snake_case__ , snake_case__ , snake_case__ = update_image_and_anno(a , a , a )
for index, image in enumerate(a ):
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
snake_case__ = random_chars(32 )
snake_case__ = paths[index].split(os.sep )[-1].rsplit(""".""" , 1 )[0]
snake_case__ = F'''{OUTPUT_DIR}/{file_name}_FLIP_{letter_code}'''
cva.imwrite(F'''/{file_root}.jpg''' , a , [cva.IMWRITE_JPEG_QUALITY, 85] )
print(F'''Success {index+1}/{len(a )} with {file_name}''' )
snake_case__ = []
for anno in new_annos[index]:
snake_case__ = F'''{anno[0]} {anno[1]} {anno[2]} {anno[3]} {anno[4]}'''
annos_list.append(a )
with open(F'''/{file_root}.txt''' , """w""" ) as outfile:
outfile.write("""\n""".join(line for line in annos_list ) )
def _UpperCAmelCase ( a : str , a : str ):
snake_case__ = []
snake_case__ = []
for label_file in glob.glob(os.path.join(a , """*.txt""" ) ):
snake_case__ = label_file.split(os.sep )[-1].rsplit(""".""" , 1 )[0]
with open(a ) as in_file:
snake_case__ = in_file.readlines()
snake_case__ = os.path.join(a , F'''{label_name}.jpg''' )
snake_case__ = []
for obj_list in obj_lists:
snake_case__ = obj_list.rstrip("""\n""" ).split(""" """ )
boxes.append(
[
int(obj[0] ),
float(obj[1] ),
float(obj[2] ),
float(obj[3] ),
float(obj[4] ),
] )
if not boxes:
continue
img_paths.append(a )
labels.append(a )
return img_paths, labels
def _UpperCAmelCase ( a : list , a : list , a : int = 1 ):
snake_case__ = []
snake_case__ = []
snake_case__ = []
for idx in range(len(a ) ):
snake_case__ = []
snake_case__ = img_list[idx]
path_list.append(a )
snake_case__ = anno_list[idx]
snake_case__ = cva.imread(a )
if flip_type == 1:
snake_case__ = cva.flip(a , a )
for bbox in img_annos:
snake_case__ = 1 - bbox[1]
new_annos.append([bbox[0], x_center_new, bbox[2], bbox[3], bbox[4]] )
elif flip_type == 0:
snake_case__ = cva.flip(a , a )
for bbox in img_annos:
snake_case__ = 1 - bbox[2]
new_annos.append([bbox[0], bbox[1], y_center_new, bbox[3], bbox[4]] )
new_annos_lists.append(a )
new_imgs_list.append(a )
return new_imgs_list, new_annos_lists, path_list
def _UpperCAmelCase ( a : int = 32 ):
assert number_char > 1, "The number of character should greater than 1"
snake_case__ = ascii_lowercase + digits
return "".join(random.choice(a ) for _ in range(a ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 654 | 0 |
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class UpperCamelCase :
'''simple docstring'''
def __init__( self , UpperCamelCase_ , ):
lowercase_ :Dict = parent
lowercase_ :Optional[Any] = 13
lowercase_ :Optional[Any] = 7
lowercase_ :List[Any] = 30
lowercase_ :int = self.seq_length + self.mem_len
lowercase_ :Any = 15
lowercase_ :Optional[Any] = True
lowercase_ :List[Any] = True
lowercase_ :Any = 99
lowercase_ :Optional[int] = [10, 50, 80]
lowercase_ :Union[str, Any] = 32
lowercase_ :List[Any] = 32
lowercase_ :Tuple = 4
lowercase_ :Tuple = 8
lowercase_ :List[Any] = 128
lowercase_ :Any = 2
lowercase_ :Tuple = 2
lowercase_ :Dict = None
lowercase_ :Optional[Any] = 1
lowercase_ :Optional[int] = 0
lowercase_ :List[str] = 3
lowercase_ :Optional[int] = self.vocab_size - 1
lowercase_ :List[Any] = 0.01
def UpperCamelCase ( self ):
lowercase_ :str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ :int = None
if self.use_labels:
lowercase_ :str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowercase_ :int = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def UpperCamelCase ( self ):
random.seed(self.seed )
tf.random.set_seed(self.seed )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Any = TFTransfoXLModel(UpperCamelCase_ )
lowercase_ :List[Any] = model(UpperCamelCase_ ).to_tuple()
lowercase_ :Dict = {'''input_ids''': input_ids_a, '''mems''': mems_a}
lowercase_ :int = model(UpperCamelCase_ ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :Any = TFTransfoXLLMHeadModel(UpperCamelCase_ )
lowercase_ :int = model(UpperCamelCase_ ).to_tuple()
lowercase_ :Optional[int] = {'''input_ids''': input_ids_a, '''labels''': lm_labels}
lowercase_ :Optional[int] = model(UpperCamelCase_ ).to_tuple()
lowercase_ :Tuple = model([input_ids_a, mems_a] ).to_tuple()
lowercase_ :Union[str, Any] = {'''input_ids''': input_ids_a, '''mems''': mems_a, '''labels''': lm_labels}
lowercase_ :Union[str, Any] = model(UpperCamelCase_ ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
lowercase_ :int = TFTransfoXLForSequenceClassification(UpperCamelCase_ )
lowercase_ :int = model(UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCamelCase ( self ):
lowercase_ :str = self.prepare_config_and_inputs()
(lowercase_) :Tuple = config_and_inputs
lowercase_ :Dict = {'''input_ids''': input_ids_a}
return config, inputs_dict
@require_tf
class UpperCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
lowercase : str =(
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
lowercase : Dict =() if is_tf_available() else ()
lowercase : List[str] =(
{
"""feature-extraction""": TFTransfoXLModel,
"""text-classification""": TFTransfoXLForSequenceClassification,
"""text-generation""": TFTransfoXLLMHeadModel,
"""zero-shot""": TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
lowercase : Optional[int] =False
lowercase : Tuple =False
lowercase : Dict =False
lowercase : Dict =False
def UpperCamelCase ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def UpperCamelCase ( self ):
lowercase_ :Union[str, Any] = TFTransfoXLModelTester(self )
lowercase_ :str = ConfigTester(self , config_class=UpperCamelCase_ , d_embed=37 )
def UpperCamelCase ( self ):
self.config_tester.run_common_tests()
def UpperCamelCase ( self ):
self.model_tester.set_seed()
lowercase_ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*UpperCamelCase_ )
def UpperCamelCase ( self ):
self.model_tester.set_seed()
lowercase_ :Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*UpperCamelCase_ )
def UpperCamelCase ( self ):
lowercase_ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*UpperCamelCase_ )
def UpperCamelCase ( self ):
lowercase_ :Any = self.model_tester.prepare_config_and_inputs_for_common()
lowercase_ :List[str] = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
lowercase_ :Dict = model_class(UpperCamelCase_ )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
lowercase_ :str = model.get_output_embeddings()
assert isinstance(UpperCamelCase_ , tf.keras.layers.Layer )
lowercase_ :Optional[int] = model.get_bias()
assert name is None
else:
lowercase_ :List[Any] = model.get_output_embeddings()
assert x is None
lowercase_ :Dict = model.get_bias()
assert name is None
def UpperCamelCase ( self ):
# TODO JP: Make TransfoXL XLA compliant
pass
@slow
def UpperCamelCase ( self ):
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ :Dict = TFTransfoXLModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@unittest.skip(reason='''This model doesn\'t play well with fit() due to not returning a single loss.''' )
def UpperCamelCase ( self ):
pass
@require_tf
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip('''Skip test until #12651 is resolved.''' )
@slow
def UpperCamelCase ( self ):
lowercase_ :Any = TFTransfoXLLMHeadModel.from_pretrained('''transfo-xl-wt103''' )
# fmt: off
lowercase_ :List[str] = tf.convert_to_tensor([[33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
lowercase_ :List[Any] = [33,1297,2,1,1009,4,1109,1_1739,4762,358,5,25,245,22,1706,17,2_0098,5,3215,21,37,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,6224,831,1_6002,2,8,603,7_8967,2_9546,23,803,20,25,416,5,8,232,4,277,6,1855,4601,3,2_9546,54,8,3609,5,5_7211,49,4,1,277,18,8,1755,1_5691,3,341,25,416,693,4_2573,71,17,401,94,31,1_7919,2,2_9546,7873,18,1,435,23,1_1011,755,5,5167,3,7983,98,84,2,2_9546,3267,8,3609,4,1,4865,1075,2,6087,71,6,346,8,5854,3,2_9546,824,1400,1868,2,19,160,2,311,8,5496,2,2_0920,17,25,1_5097,3,24,24,0,33,1,1857,2,1,1009,4,1109,1_1739,4762,358,5,25,245,28,1110,3,13,1041,4,24,603,490,2,7_1477,2_0098,10_4447,2,2_0961,1,2604,4,1,329,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
lowercase_ :Any = model.generate(UpperCamelCase_ , max_length=200 , do_sample=UpperCamelCase_ )
self.assertListEqual(output_ids[0].numpy().tolist() , UpperCamelCase_ )
| 713 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import XLMRobertaTokenizer
from diffusers import (
AltDiffusionImgaImgPipeline,
AutoencoderKL,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.pipelines.alt_diffusion.modeling_roberta_series import (
RobertaSeriesConfig,
RobertaSeriesModelWithTransformation,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCamelCase ( self ):
lowercase_ :Dict = 1
lowercase_ :Optional[Any] = 3
lowercase_ :Optional[int] = (32, 32)
lowercase_ :Dict = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(UpperCamelCase_ )
return image
@property
def UpperCamelCase ( self ):
torch.manual_seed(0 )
lowercase_ :Optional[int] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('''DownBlock2D''', '''CrossAttnDownBlock2D''') , up_block_types=('''CrossAttnUpBlock2D''', '''UpBlock2D''') , cross_attention_dim=32 , )
return model
@property
def UpperCamelCase ( self ):
torch.manual_seed(0 )
lowercase_ :Dict = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['''DownEncoderBlock2D''', '''DownEncoderBlock2D'''] , up_block_types=['''UpDecoderBlock2D''', '''UpDecoderBlock2D'''] , latent_channels=4 , )
return model
@property
def UpperCamelCase ( self ):
torch.manual_seed(0 )
lowercase_ :str = RobertaSeriesConfig(
hidden_size=32 , project_dim=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=5006 , )
return RobertaSeriesModelWithTransformation(UpperCamelCase_ )
@property
def UpperCamelCase ( self ):
def extract(*UpperCamelCase_ , **UpperCamelCase_ ):
class UpperCamelCase :
'''simple docstring'''
def __init__( self ):
lowercase_ :List[str] = torch.ones([0] )
def UpperCamelCase ( self , UpperCamelCase_ ):
self.pixel_values.to(UpperCamelCase_ )
return self
return Out()
return extract
def UpperCamelCase ( self ):
lowercase_ :List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
lowercase_ :Dict = self.dummy_cond_unet
lowercase_ :Optional[Any] = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
lowercase_ :Union[str, Any] = self.dummy_vae
lowercase_ :Any = self.dummy_text_encoder
lowercase_ :List[Any] = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowercase_ :List[str] = 77
lowercase_ :int = self.dummy_image.to(UpperCamelCase_ )
lowercase_ :Optional[Any] = init_image / 2 + 0.5
# make sure here that pndm scheduler skips prk
lowercase_ :str = AltDiffusionImgaImgPipeline(
unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , safety_checker=UpperCamelCase_ , feature_extractor=self.dummy_extractor , )
lowercase_ :List[str] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCamelCase_ )
lowercase_ :int = alt_pipe.to(UpperCamelCase_ )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase_ :Any = '''A painting of a squirrel eating a burger'''
lowercase_ :Tuple = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase_ :Optional[int] = alt_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=UpperCamelCase_ , )
lowercase_ :Dict = output.images
lowercase_ :Tuple = torch.Generator(device=UpperCamelCase_ ).manual_seed(0 )
lowercase_ :Optional[int] = alt_pipe(
[prompt] , generator=UpperCamelCase_ , guidance_scale=6.0 , num_inference_steps=2 , output_type='''np''' , image=UpperCamelCase_ , return_dict=UpperCamelCase_ , )[0]
lowercase_ :Optional[int] = image[0, -3:, -3:, -1]
lowercase_ :Dict = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
lowercase_ :int = np.array([0.4427, 0.3731, 0.4249, 0.4941, 0.4546, 0.4148, 0.4193, 0.4666, 0.4499] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5E-3
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 5E-3
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCamelCase ( self ):
lowercase_ :List[str] = self.dummy_cond_unet
lowercase_ :Dict = PNDMScheduler(skip_prk_steps=UpperCamelCase_ )
lowercase_ :Tuple = self.dummy_vae
lowercase_ :Dict = self.dummy_text_encoder
lowercase_ :Tuple = XLMRobertaTokenizer.from_pretrained('''hf-internal-testing/tiny-xlm-roberta''' )
lowercase_ :str = 77
lowercase_ :str = self.dummy_image.to(UpperCamelCase_ )
# put models in fp16
lowercase_ :Union[str, Any] = unet.half()
lowercase_ :Union[str, Any] = vae.half()
lowercase_ :List[str] = bert.half()
# make sure here that pndm scheduler skips prk
lowercase_ :List[Any] = AltDiffusionImgaImgPipeline(
unet=UpperCamelCase_ , scheduler=UpperCamelCase_ , vae=UpperCamelCase_ , text_encoder=UpperCamelCase_ , tokenizer=UpperCamelCase_ , safety_checker=UpperCamelCase_ , feature_extractor=self.dummy_extractor , )
lowercase_ :List[Any] = VaeImageProcessor(vae_scale_factor=alt_pipe.vae_scale_factor , do_normalize=UpperCamelCase_ )
lowercase_ :List[str] = alt_pipe.to(UpperCamelCase_ )
alt_pipe.set_progress_bar_config(disable=UpperCamelCase_ )
lowercase_ :str = '''A painting of a squirrel eating a burger'''
lowercase_ :Union[str, Any] = torch.manual_seed(0 )
lowercase_ :Any = alt_pipe(
[prompt] , generator=UpperCamelCase_ , num_inference_steps=2 , output_type='''np''' , image=UpperCamelCase_ , ).images
assert image.shape == (1, 32, 32, 3)
@unittest.skipIf(torch_device != '''cuda''' , '''This test requires a GPU''' )
def UpperCamelCase ( self ):
lowercase_ :Optional[int] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
# resize to resolution that is divisible by 8 but not 16 or 32
lowercase_ :Optional[Any] = init_image.resize((760, 504) )
lowercase_ :List[str] = '''BAAI/AltDiffusion'''
lowercase_ :Optional[Any] = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCamelCase_ , safety_checker=UpperCamelCase_ , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
lowercase_ :Optional[Any] = '''A fantasy landscape, trending on artstation'''
lowercase_ :Optional[Any] = torch.manual_seed(0 )
lowercase_ :str = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , strength=0.75 , guidance_scale=7.5 , generator=UpperCamelCase_ , output_type='''np''' , )
lowercase_ :Optional[Any] = output.images[0]
lowercase_ :Optional[int] = image[255:258, 383:386, -1]
assert image.shape == (504, 760, 3)
lowercase_ :Any = np.array([0.9358, 0.9397, 0.9599, 0.9901, 1.0000, 1.0000, 0.9882, 1.0000, 1.0000] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@slow
@require_torch_gpu
class UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
def UpperCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCamelCase ( self ):
lowercase_ :Union[str, Any] = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/img2img/sketch-mountains-input.jpg''' )
lowercase_ :Any = init_image.resize((768, 512) )
lowercase_ :List[str] = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/img2img/fantasy_landscape_alt.npy''' )
lowercase_ :List[Any] = '''BAAI/AltDiffusion'''
lowercase_ :Tuple = AltDiffusionImgaImgPipeline.from_pretrained(
UpperCamelCase_ , safety_checker=UpperCamelCase_ , )
pipe.to(UpperCamelCase_ )
pipe.set_progress_bar_config(disable=UpperCamelCase_ )
pipe.enable_attention_slicing()
lowercase_ :List[str] = '''A fantasy landscape, trending on artstation'''
lowercase_ :Optional[int] = torch.manual_seed(0 )
lowercase_ :Tuple = pipe(
prompt=UpperCamelCase_ , image=UpperCamelCase_ , strength=0.75 , guidance_scale=7.5 , generator=UpperCamelCase_ , output_type='''np''' , )
lowercase_ :int = output.images[0]
assert image.shape == (512, 768, 3)
# img2img is flaky across GPUs even in fp32, so using MAE here
assert np.abs(expected_image - image ).max() < 1E-2
| 441 | 0 |
'''simple docstring'''
import torch
from diffusers import StableDiffusionPipeline
__SCREAMING_SNAKE_CASE : Any ='''path-to-your-trained-model'''
__SCREAMING_SNAKE_CASE : Optional[int] =StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.floataa).to('cuda')
__SCREAMING_SNAKE_CASE : Dict ='''A photo of sks dog in a bucket'''
__SCREAMING_SNAKE_CASE : Optional[int] =pipe(prompt, num_inference_steps=50, guidance_scale=7.5).images[0]
image.save('dog-bucket.png')
| 135 | '''simple docstring'''
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
)
| 107 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE__ = {
"configuration_llama": ["LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP", "LlamaConfig"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["LlamaTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = ["LlamaTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ = [
"LlamaForCausalLM",
"LlamaModel",
"LlamaPreTrainedModel",
"LlamaForSequenceClassification",
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 702 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def lowerCamelCase ( _snake_case : str=None ):
'''simple docstring'''
if subparsers is not None:
lowercase__ = subparsers.add_parser("env" )
else:
lowercase__ = argparse.ArgumentParser("Accelerate env command" )
parser.add_argument(
"--config_file" ,default=_snake_case ,help="The config file to use for the default values in the launching script." )
if subparsers is not None:
parser.set_defaults(func=_snake_case )
return parser
def lowerCamelCase ( _snake_case : Tuple ):
'''simple docstring'''
lowercase__ = torch.__version__
lowercase__ = torch.cuda.is_available()
lowercase__ = is_xpu_available()
lowercase__ = is_npu_available()
lowercase__ = "Not found"
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(_snake_case ):
lowercase__ = load_config_from_file(args.config_file ).to_dict()
lowercase__ = {
"`Accelerate` version": version,
"Platform": platform.platform(),
"Python version": platform.python_version(),
"Numpy version": np.__version__,
"PyTorch version (GPU?)": f'''{pt_version} ({pt_cuda_available})''',
"PyTorch XPU available": str(_snake_case ),
"PyTorch NPU available": str(_snake_case ),
"System RAM": f'''{psutil.virtual_memory().total / 1_024 ** 3:.2f} GB''',
}
if pt_cuda_available:
lowercase__ = torch.cuda.get_device_name()
print("\nCopy-and-paste the text below in your GitHub issue\n" )
print("\n".join([f'''- {prop}: {val}''' for prop, val in info.items()] ) )
print("- `Accelerate` default config:" if args.config_file is None else "- `Accelerate` config passed:" )
lowercase__ = (
"\n".join([f'''\t- {prop}: {val}''' for prop, val in accelerate_config.items()] )
if isinstance(_snake_case ,_snake_case )
else f'''\t{accelerate_config}'''
)
print(_snake_case )
lowercase__ = accelerate_config
return info
def lowerCamelCase ( ):
'''simple docstring'''
lowercase__ = env_command_parser()
lowercase__ = parser.parse_args()
env_command(_snake_case )
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 539 | 0 |
'''simple docstring'''
import unittest
import numpy as np
def lowerCamelCase ( UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray , UpperCAmelCase__ : np.ndarray | None = None , ) -> np.ndarray:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[int] = np.shape(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :Tuple = np.shape(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :str = np.shape(UpperCAmelCase__ )
if shape_a[0] != shape_b[0]:
SCREAMING_SNAKE_CASE__ :Any = (
'Expected the same number of rows for A and B. '
F'''Instead found A of size {shape_a} and B of size {shape_b}'''
)
raise ValueError(UpperCAmelCase__ )
if shape_b[1] != shape_c[1]:
SCREAMING_SNAKE_CASE__ :Optional[int] = (
'Expected the same number of columns for B and C. '
F'''Instead found B of size {shape_b} and C of size {shape_c}'''
)
raise ValueError(UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :List[Any] = pseudo_inv
if a_inv is None:
try:
SCREAMING_SNAKE_CASE__ :List[Any] = np.linalg.inv(UpperCAmelCase__ )
except np.linalg.LinAlgError:
raise ValueError(
'Input matrix A is not invertible. Cannot compute Schur complement.' )
return mat_c - mat_b.T @ a_inv @ mat_b
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
def __lowerCamelCase ( self : str ) -> None:
SCREAMING_SNAKE_CASE__ :int = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
SCREAMING_SNAKE_CASE__ :int = np.array([[0, 3], [3, 0], [2, 3]] )
SCREAMING_SNAKE_CASE__ :Tuple = np.array([[2, 1], [6, 3]] )
SCREAMING_SNAKE_CASE__ :List[str] = schur_complement(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = np.block([[a, b], [b.T, c]] )
SCREAMING_SNAKE_CASE__ :Any = np.linalg.det(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :int = np.linalg.det(UpperCamelCase_ )
SCREAMING_SNAKE_CASE__ :Dict = np.linalg.det(UpperCamelCase_ )
self.assertAlmostEqual(UpperCamelCase_ , det_a * det_s )
def __lowerCamelCase ( self : Optional[int] ) -> None:
SCREAMING_SNAKE_CASE__ :Any = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
SCREAMING_SNAKE_CASE__ :Tuple = np.array([[0, 3], [3, 0], [2, 3]] )
SCREAMING_SNAKE_CASE__ :List[str] = np.array([[2, 1], [6, 3]] )
with self.assertRaises(UpperCamelCase_ ):
schur_complement(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def __lowerCamelCase ( self : Union[str, Any] ) -> None:
SCREAMING_SNAKE_CASE__ :Tuple = np.array([[1, 2, 1], [2, 1, 2], [3, 2, 4]] )
SCREAMING_SNAKE_CASE__ :int = np.array([[0, 3], [3, 0], [2, 3]] )
SCREAMING_SNAKE_CASE__ :Union[str, Any] = np.array([[2, 1, 3], [6, 3, 5]] )
with self.assertRaises(UpperCamelCase_ ):
schur_complement(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
unittest.main()
| 209 | '''simple docstring'''
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import List
import timm
import torch
import torch.nn as nn
from huggingface_hub import hf_hub_download
from torch import Tensor
from transformers import AutoImageProcessor, ResNetConfig, ResNetForImageClassification
from transformers.utils import logging
logging.set_verbosity_info()
UpperCamelCase_ = logging.get_logger()
@dataclass
class _SCREAMING_SNAKE_CASE:
A_ : nn.Module
A_ : List[nn.Module] = field(default_factory=_SCREAMING_SNAKE_CASE )
A_ : list = field(default_factory=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self : Union[str, Any] , UpperCamelCase_ : Optional[Any] , UpperCamelCase_ : Tensor , UpperCamelCase_ : Tensor ) -> Optional[int]:
SCREAMING_SNAKE_CASE__ :Optional[Any] = len(list(m.modules() ) ) == 1 or isinstance(UpperCamelCase_ , nn.Convad ) or isinstance(UpperCamelCase_ , nn.BatchNormad )
if has_not_submodules:
self.traced.append(UpperCamelCase_ )
def __call__( self : Tuple , UpperCamelCase_ : Tensor ) -> int:
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(UpperCamelCase_ )
[x.remove() for x in self.handles]
return self
@property
def __lowerCamelCase ( self : Tuple ) -> str:
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda UpperCamelCase_ : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class _SCREAMING_SNAKE_CASE:
A_ : nn.Module
A_ : nn.Module
A_ : int = 0
A_ : List = field(default_factory=_SCREAMING_SNAKE_CASE )
A_ : List = field(default_factory=_SCREAMING_SNAKE_CASE )
def __call__( self : List[Any] , UpperCamelCase_ : Tensor ) -> int:
SCREAMING_SNAKE_CASE__ :List[str] = Tracker(self.dest )(UpperCamelCase_ ).parametrized
SCREAMING_SNAKE_CASE__ :Any = Tracker(self.src )(UpperCamelCase_ ).parametrized
SCREAMING_SNAKE_CASE__ :Dict = list(filter(lambda UpperCamelCase_ : type(UpperCamelCase_ ) not in self.src_skip , UpperCamelCase_ ) )
SCREAMING_SNAKE_CASE__ :str = list(filter(lambda UpperCamelCase_ : type(UpperCamelCase_ ) not in self.dest_skip , UpperCamelCase_ ) )
if len(UpperCamelCase_ ) != len(UpperCamelCase_ ):
raise Exception(
f'''Numbers of operations are different. Source module has {len(UpperCamelCase_ )} operations while'''
f''' destination module has {len(UpperCamelCase_ )}.''' )
for dest_m, src_m in zip(UpperCamelCase_ , UpperCamelCase_ ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'''Transfered from={src_m} to={dest_m}''' )
def lowerCamelCase ( UpperCAmelCase__ : str , UpperCAmelCase__ : ResNetConfig , UpperCAmelCase__ : Path , UpperCAmelCase__ : bool = True ) -> Union[str, Any]:
'''simple docstring'''
print(F'''Converting {name}...''' )
with torch.no_grad():
SCREAMING_SNAKE_CASE__ :str = timm.create_model(UpperCAmelCase__ , pretrained=UpperCAmelCase__ ).eval()
SCREAMING_SNAKE_CASE__ :List[str] = ResNetForImageClassification(UpperCAmelCase__ ).eval()
SCREAMING_SNAKE_CASE__ :Dict = ModuleTransfer(src=UpperCAmelCase__ , dest=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :List[Any] = torch.randn((1, 3, 2_2_4, 2_2_4) )
module_transfer(UpperCAmelCase__ )
assert torch.allclose(from_model(UpperCAmelCase__ ) , our_model(UpperCAmelCase__ ).logits ), "The model logits don't match the original one."
SCREAMING_SNAKE_CASE__ :List[str] = F'''resnet{"-".join(name.split("resnet" ) )}'''
print(UpperCAmelCase__ )
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add model' , use_temp_dir=UpperCAmelCase__ , )
# we can use the convnext one
SCREAMING_SNAKE_CASE__ :int = AutoImageProcessor.from_pretrained('facebook/convnext-base-224-22k-1k' )
image_processor.push_to_hub(
repo_path_or_name=save_directory / checkpoint_name , commit_message='Add image processor' , use_temp_dir=UpperCAmelCase__ , )
print(F'''Pushed {checkpoint_name}''' )
def lowerCamelCase ( UpperCAmelCase__ : Path , UpperCAmelCase__ : str = None , UpperCAmelCase__ : bool = True ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :List[str] = 'imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE__ :Any = 1_0_0_0
SCREAMING_SNAKE_CASE__ :List[Any] = (1, num_labels)
SCREAMING_SNAKE_CASE__ :List[str] = 'huggingface/label-files'
SCREAMING_SNAKE_CASE__ :Union[str, Any] = num_labels
SCREAMING_SNAKE_CASE__ :Tuple = json.load(open(hf_hub_download(UpperCAmelCase__ , UpperCAmelCase__ , repo_type='dataset' ) , 'r' ) )
SCREAMING_SNAKE_CASE__ :int = {int(UpperCAmelCase__ ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ :str = idalabel
SCREAMING_SNAKE_CASE__ :Any = {v: k for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE__ :Optional[Any] = partial(UpperCAmelCase__ , num_labels=UpperCAmelCase__ , idalabel=UpperCAmelCase__ , labelaid=UpperCAmelCase__ )
SCREAMING_SNAKE_CASE__ :List[str] = {
'resnet18': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type='basic' ),
'resnet26': ImageNetPreTrainedConfig(
depths=[2, 2, 2, 2] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='bottleneck' ),
'resnet34': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[6_4, 1_2_8, 2_5_6, 5_1_2] , layer_type='basic' ),
'resnet50': ImageNetPreTrainedConfig(
depths=[3, 4, 6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='bottleneck' ),
'resnet101': ImageNetPreTrainedConfig(
depths=[3, 4, 2_3, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='bottleneck' ),
'resnet152': ImageNetPreTrainedConfig(
depths=[3, 8, 3_6, 3] , hidden_sizes=[2_5_6, 5_1_2, 1_0_2_4, 2_0_4_8] , layer_type='bottleneck' ),
}
if model_name:
convert_weight_and_push(UpperCAmelCase__ , names_to_config[model_name] , UpperCAmelCase__ , UpperCAmelCase__ )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
return config, expected_shape
if __name__ == "__main__":
UpperCamelCase_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--model_name''',
default=None,
type=str,
help=(
'''The name of the model you wish to convert, it must be one of the supported resnet* architecture,'''
''' currently: resnet18,26,34,50,101,152. If `None`, all of them will the converted.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=Path,
required=True,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
default=True,
type=bool,
required=False,
help='''If True, push model and image processor to the hub.''',
)
UpperCamelCase_ = parser.parse_args()
UpperCamelCase_ = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 209 | 1 |
'''simple docstring'''
import asyncio
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
from distutils.util import strtobool
from functools import partial
from pathlib import Path
from typing import List, Union
from unittest import mock
import torch
from ..state import AcceleratorState, PartialState
from ..utils import (
gather,
is_bnb_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_mps_available,
is_safetensors_available,
is_tensorboard_available,
is_torch_version,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
def A ( __UpperCamelCase , __UpperCamelCase=False ) -> List[Any]:
try:
A__ = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
A__ = default
else:
# KEY is set, convert it to True or False.
try:
A__ = strtobool(__lowerCAmelCase )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(f'''If set, {key} must be yes or no.''' )
return _value
SCREAMING_SNAKE_CASE__ = parse_flag_from_env('''RUN_SLOW''', default=False)
def A ( __UpperCamelCase ) -> Any:
return unittest.skip('Test was skipped' )(__lowerCAmelCase )
def A ( __UpperCamelCase ) -> str:
return unittest.skipUnless(_run_slow_tests , 'test is slow' )(__lowerCAmelCase )
def A ( __UpperCamelCase ) -> Optional[int]:
return unittest.skipUnless(not torch.cuda.is_available() , 'test requires only a CPU' )(__lowerCAmelCase )
def A ( __UpperCamelCase ) -> Optional[int]:
return unittest.skipUnless(torch.cuda.is_available() , 'test requires a GPU' )(__lowerCAmelCase )
def A ( __UpperCamelCase ) -> List[Any]:
return unittest.skipUnless(is_xpu_available() , 'test requires a XPU' )(__lowerCAmelCase )
def A ( __UpperCamelCase ) -> Tuple:
return unittest.skipUnless(is_mps_available() , 'test requires a `mps` backend support in `torch`' )(__lowerCAmelCase )
def A ( __UpperCamelCase ) -> Optional[int]:
return unittest.skipUnless(
is_transformers_available() and is_datasets_available() , 'test requires the Hugging Face suite' )(__lowerCAmelCase )
def A ( __UpperCamelCase ) -> Any:
return unittest.skipUnless(is_bnb_available() , 'test requires the bitsandbytes library' )(__lowerCAmelCase )
def A ( __UpperCamelCase ) -> int:
return unittest.skipUnless(is_tpu_available() , 'test requires TPU' )(__lowerCAmelCase )
def A ( __UpperCamelCase ) -> List[Any]:
return unittest.skipUnless(torch.cuda.device_count() == 1 , 'test requires a GPU' )(__lowerCAmelCase )
def A ( __UpperCamelCase ) -> int:
return unittest.skipUnless(torch.xpu.device_count() == 1 , 'test requires a XPU' )(__lowerCAmelCase )
def A ( __UpperCamelCase ) -> List[Any]:
return unittest.skipUnless(torch.cuda.device_count() > 1 , 'test requires multiple GPUs' )(__lowerCAmelCase )
def A ( __UpperCamelCase ) -> Any:
return unittest.skipUnless(torch.xpu.device_count() > 1 , 'test requires multiple XPUs' )(__lowerCAmelCase )
def A ( __UpperCamelCase ) -> int:
return unittest.skipUnless(is_safetensors_available() , 'test requires safetensors' )(__lowerCAmelCase )
def A ( __UpperCamelCase ) -> Any:
return unittest.skipUnless(is_deepspeed_available() , 'test requires DeepSpeed' )(__lowerCAmelCase )
def A ( __UpperCamelCase ) -> Optional[Any]:
return unittest.skipUnless(is_torch_version('>=' , '1.12.0' ) , 'test requires torch version >= 1.12.0' )(__lowerCAmelCase )
def A ( __UpperCamelCase=None , __UpperCamelCase=None ) -> Dict:
if test_case is None:
return partial(__lowerCAmelCase , version=__lowerCAmelCase )
return unittest.skipUnless(is_torch_version('>=' , __lowerCAmelCase ) , f'''test requires torch version >= {version}''' )(__lowerCAmelCase )
def A ( __UpperCamelCase ) -> Dict:
return unittest.skipUnless(is_tensorboard_available() , 'test requires Tensorboard' )(__lowerCAmelCase )
def A ( __UpperCamelCase ) -> List[str]:
return unittest.skipUnless(is_wandb_available() , 'test requires wandb' )(__lowerCAmelCase )
def A ( __UpperCamelCase ) -> List[Any]:
return unittest.skipUnless(is_comet_ml_available() , 'test requires comet_ml' )(__lowerCAmelCase )
SCREAMING_SNAKE_CASE__ = (
any([is_wandb_available(), is_tensorboard_available()]) and not is_comet_ml_available()
)
def A ( __UpperCamelCase ) -> Union[str, Any]:
return unittest.skipUnless(
_atleast_one_tracker_available , 'test requires at least one tracker to be available and for `comet_ml` to not be installed' , )(__lowerCAmelCase )
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
A__ : Union[str, Any] = True
@classmethod
def _a ( cls : int ):
"""simple docstring"""
A__ = tempfile.mkdtemp()
@classmethod
def _a ( cls : Any ):
"""simple docstring"""
if os.path.exists(cls.tmpdir ):
shutil.rmtree(cls.tmpdir )
def _a ( self : Optional[Any] ):
"""simple docstring"""
if self.clear_on_setup:
for path in Path(self.tmpdir ).glob('**/*' ):
if path.is_file():
path.unlink()
elif path.is_dir():
shutil.rmtree(_a )
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Dict ):
"""simple docstring"""
super().tearDown()
# Reset the state of the AcceleratorState singleton.
AcceleratorState._reset_state()
PartialState._reset_state()
class __lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def _a ( self : Union[str, Any] , _snake_case : Any ):
"""simple docstring"""
A__ = mocks if isinstance(_a , (tuple, list) ) else [mocks]
for m in self.mocks:
m.start()
self.addCleanup(m.stop )
def A ( __UpperCamelCase ) -> List[str]:
A__ = AcceleratorState()
A__ = tensor[None].clone().to(state.device )
A__ = gather(__lowerCAmelCase ).cpu()
A__ = tensor[0].cpu()
for i in range(tensors.shape[0] ):
if not torch.equal(tensors[i] , __lowerCAmelCase ):
return False
return True
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self : Tuple , _snake_case : List[Any] , _snake_case : int , _snake_case : Tuple ):
"""simple docstring"""
A__ = returncode
A__ = stdout
A__ = stderr
async def A ( __UpperCamelCase , __UpperCamelCase ) -> str:
while True:
A__ = await stream.readline()
if line:
callback(__lowerCAmelCase )
else:
break
async def A ( __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=False , __UpperCamelCase=False ) -> _RunOutput:
if echo:
print('\nRunning: ' , ' '.join(__lowerCAmelCase ) )
A__ = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=__lowerCAmelCase , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=__lowerCAmelCase , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
A__ = []
A__ = []
def tee(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase="" ):
A__ = line.decode('utf-8' ).rstrip()
sink.append(__lowerCAmelCase )
if not quiet:
print(__lowerCAmelCase , __lowerCAmelCase , file=__lowerCAmelCase )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
asyncio.create_task(_read_stream(p.stdout , lambda __UpperCamelCase : tee(__lowerCAmelCase , __lowerCAmelCase , sys.stdout , label='stdout:' ) ) ),
asyncio.create_task(_read_stream(p.stderr , lambda __UpperCamelCase : tee(__lowerCAmelCase , __lowerCAmelCase , sys.stderr , label='stderr:' ) ) ),
] , timeout=__lowerCAmelCase , )
return _RunOutput(await p.wait() , __lowerCAmelCase , __lowerCAmelCase )
def A ( __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=180 , __UpperCamelCase=False , __UpperCamelCase=True ) -> _RunOutput:
A__ = asyncio.get_event_loop()
A__ = loop.run_until_complete(
_stream_subprocess(__lowerCAmelCase , env=__lowerCAmelCase , stdin=__lowerCAmelCase , timeout=__lowerCAmelCase , quiet=__lowerCAmelCase , echo=__lowerCAmelCase ) )
A__ = """ """.join(__lowerCAmelCase )
if result.returncode > 0:
A__ = """\n""".join(result.stderr )
raise RuntimeError(
f'''\'{cmd_str}\' failed with returncode {result.returncode}\n\n'''
f'''The combined stderr from workers follows:\n{stderr}''' )
return result
class __lowerCAmelCase ( UpperCamelCase_ ):
"""simple docstring"""
pass
def A ( __UpperCamelCase , __UpperCamelCase=False ) -> str:
try:
A__ = subprocess.check_output(__lowerCAmelCase , stderr=subprocess.STDOUT )
if return_stdout:
if hasattr(__lowerCAmelCase , 'decode' ):
A__ = output.decode('utf-8' )
return output
except subprocess.CalledProcessError as e:
raise SubprocessCallException(
f'''Command `{" ".join(__lowerCAmelCase )}` failed with the following error:\n\n{e.output.decode()}''' ) from e
| 712 |
import pytest
import datasets
# Import fixture modules as plugins
SCREAMING_SNAKE_CASE__ = ['''tests.fixtures.files''', '''tests.fixtures.hub''', '''tests.fixtures.fsspec''']
def A ( __UpperCamelCase , __UpperCamelCase ) -> Optional[int]:
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ['integration', 'unit'] ):
continue
item.add_marker(pytest.mark.unit )
def A ( __UpperCamelCase ) -> str:
config.addinivalue_line('markers' , 'torchaudio_latest: mark test to run with torchaudio>=0.12' )
@pytest.fixture(autouse=__UpperCamelCase )
def A ( __UpperCamelCase , __UpperCamelCase ) -> Union[str, Any]:
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
A__ = tmp_path_factory.getbasetemp() / 'cache'
A__ = test_hf_cache_home / 'datasets'
A__ = test_hf_cache_home / 'metrics'
A__ = test_hf_cache_home / 'modules'
monkeypatch.setattr('datasets.config.HF_DATASETS_CACHE' , str(__UpperCamelCase ) )
monkeypatch.setattr('datasets.config.HF_METRICS_CACHE' , str(__UpperCamelCase ) )
monkeypatch.setattr('datasets.config.HF_MODULES_CACHE' , str(__UpperCamelCase ) )
A__ = test_hf_datasets_cache / 'downloads'
monkeypatch.setattr('datasets.config.DOWNLOADED_DATASETS_PATH' , str(__UpperCamelCase ) )
A__ = test_hf_datasets_cache / 'downloads' / 'extracted'
monkeypatch.setattr('datasets.config.EXTRACTED_DATASETS_PATH' , str(__UpperCamelCase ) )
@pytest.fixture(autouse=__UpperCamelCase , scope='session' )
def A ( ) -> Union[str, Any]:
datasets.disable_progress_bar()
@pytest.fixture(autouse=__UpperCamelCase )
def A ( __UpperCamelCase ) -> int:
# don't take tests into account when counting downloads
monkeypatch.setattr('datasets.config.HF_UPDATE_DOWNLOAD_COUNTS' , __UpperCamelCase )
@pytest.fixture
def A ( __UpperCamelCase ) -> Any:
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr('sqlalchemy.util.deprecations.SILENCE_UBER_WARNING' , __UpperCamelCase )
| 52 | 0 |
import qiskit
def __UpperCamelCase ( A , A ):
UpperCamelCase__ = qiskit.Aer.get_backend('''aer_simulator''' )
UpperCamelCase__ = qiskit.QuantumCircuit(4 , 2 )
# encode inputs in qubits 0 and 1
if bita == 1:
qc_ha.x(0 )
if bita == 1:
qc_ha.x(1 )
qc_ha.barrier()
# use cnots to write XOR of the inputs on qubit2
qc_ha.cx(0 , 2 )
qc_ha.cx(1 , 2 )
# use ccx / toffoli gate to write AND of the inputs on qubit3
qc_ha.ccx(0 , 1 , 3 )
qc_ha.barrier()
# extract outputs
qc_ha.measure(2 , 0 ) # extract XOR value
qc_ha.measure(3 , 1 ) # extract AND value
# Execute the circuit on the qasm simulator
UpperCamelCase__ = qiskit.execute(A , A , shots=1000 )
# Return the histogram data of the results of the experiment
return job.result().get_counts(A )
if __name__ == "__main__":
__magic_name__ =half_adder(1, 1)
print(f"""Half Adder Output Qubit Counts: {counts}""")
| 415 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ ={
'''configuration_roformer''': ['''ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RoFormerConfig''', '''RoFormerOnnxConfig'''],
'''tokenization_roformer''': ['''RoFormerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ =['''RoFormerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ =[
'''ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RoFormerForCausalLM''',
'''RoFormerForMaskedLM''',
'''RoFormerForMultipleChoice''',
'''RoFormerForQuestionAnswering''',
'''RoFormerForSequenceClassification''',
'''RoFormerForTokenClassification''',
'''RoFormerLayer''',
'''RoFormerModel''',
'''RoFormerPreTrainedModel''',
'''load_tf_weights_in_roformer''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ =[
'''TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRoFormerForCausalLM''',
'''TFRoFormerForMaskedLM''',
'''TFRoFormerForMultipleChoice''',
'''TFRoFormerForQuestionAnswering''',
'''TFRoFormerForSequenceClassification''',
'''TFRoFormerForTokenClassification''',
'''TFRoFormerLayer''',
'''TFRoFormerModel''',
'''TFRoFormerPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ =[
'''FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FlaxRoFormerForMaskedLM''',
'''FlaxRoFormerForMultipleChoice''',
'''FlaxRoFormerForQuestionAnswering''',
'''FlaxRoFormerForSequenceClassification''',
'''FlaxRoFormerForTokenClassification''',
'''FlaxRoFormerModel''',
'''FlaxRoFormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roformer import ROFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, RoFormerConfig, RoFormerOnnxConfig
from .tokenization_roformer import RoFormerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roformer_fast import RoFormerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roformer import (
ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
RoFormerForCausalLM,
RoFormerForMaskedLM,
RoFormerForMultipleChoice,
RoFormerForQuestionAnswering,
RoFormerForSequenceClassification,
RoFormerForTokenClassification,
RoFormerLayer,
RoFormerModel,
RoFormerPreTrainedModel,
load_tf_weights_in_roformer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roformer import (
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerLayer,
TFRoFormerModel,
TFRoFormerPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roformer import (
FLAX_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
FlaxRoFormerPreTrainedModel,
)
else:
import sys
__magic_name__ =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 415 | 1 |
from __future__ import annotations
from statistics import mean
def A_ ( __a : list[int] , __a : list[int] , __a : int ):
"""simple docstring"""
a__ = [0] * no_of_processes
a__ = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(__a ):
a__ = burst_time[i]
a__ = []
a__ = 0
a__ = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
a__ = []
a__ = -1
for i in range(__a ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(__a )
if len(__a ) > 0:
a__ = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
a__ = i
total_time += burst_time[target_process]
completed += 1
a__ = 0
a__ = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def A_ ( __a : list[int] , __a : int , __a : list[int] ):
"""simple docstring"""
a__ = [0] * no_of_processes
for i in range(__a ):
a__ = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print("""[TEST CASE 01]""")
UpperCAmelCase = 4
UpperCAmelCase = [2, 5, 3, 7]
UpperCAmelCase = [0, 0, 0, 0]
UpperCAmelCase = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
UpperCAmelCase = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print("""PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time""")
for i, process_id in enumerate(list(range(1, 5))):
print(
f"""{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"""
f"""{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"""
)
print(f"""\nAverage waiting time = {mean(waiting_time):.5f}""")
print(f"""Average turnaround time = {mean(turn_around_time):.5f}""")
| 700 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
UpperCAmelCase = None
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
UpperCAmelCase = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json""",
},
}
UpperCAmelCase = {
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
UpperCAmelCase = """▁"""
class __snake_case ( SCREAMING_SNAKE_CASE):
'''simple docstring'''
UpperCamelCase__ : Tuple = VOCAB_FILES_NAMES
UpperCamelCase__ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
UpperCamelCase__ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCamelCase__ : List[str] = AlbertTokenizer
def __init__( self , a_=None , a_=None , a_=True , a_=True , a_=False , a_="[CLS]" , a_="[SEP]" , a_="<unk>" , a_="[SEP]" , a_="<pad>" , a_="[CLS]" , a_="[MASK]" , **a_ , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
a__ = (
AddedToken(a_ , lstrip=a_ , rstrip=a_ , normalized=a_ )
if isinstance(a_ , a_ )
else mask_token
)
super().__init__(
a_ , tokenizer_file=a_ , do_lower_case=a_ , remove_space=a_ , keep_accents=a_ , bos_token=a_ , eos_token=a_ , unk_token=a_ , sep_token=a_ , pad_token=a_ , cls_token=a_ , mask_token=a_ , **a_ , )
a__ = do_lower_case
a__ = remove_space
a__ = keep_accents
a__ = vocab_file
a__ = False if not self.vocab_file else True
def _a ( self , a_ , a_ = None ):
a__ = [self.sep_token_id]
a__ = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _a ( self , a_ , a_ = None ):
a__ = [self.sep_token_id]
a__ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self , a_ , a_ = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(a_ ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a__ = os.path.join(
a_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a_ ):
copyfile(self.vocab_file , a_ )
return (out_vocab_file,)
| 351 | 0 |
'''simple docstring'''
lowercase : int = [
[0, 16, 13, 0, 0, 0],
[0, 0, 10, 12, 0, 0],
[0, 4, 0, 0, 14, 0],
[0, 0, 9, 0, 0, 20],
[0, 0, 0, 7, 0, 4],
[0, 0, 0, 0, 0, 0],
]
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A , __A ) -> Dict:
# Return True if there is node that has not iterated.
_snake_case = [False] * len(__A )
_snake_case = [s]
_snake_case = True
while queue:
_snake_case = queue.pop(0 )
for ind in range(len(graph[u] ) ):
if visited[ind] is False and graph[u][ind] > 0:
queue.append(__A )
_snake_case = True
_snake_case = u
return visited[t]
def SCREAMING_SNAKE_CASE__ ( __A , __A , __A ) -> Any:
_snake_case = [-1] * (len(__A ))
_snake_case = 0
_snake_case = []
_snake_case = [i[:] for i in graph] # Record original cut, copy.
while bfs(__A , __A , __A , __A ):
_snake_case = float('Inf' )
_snake_case = sink
while s != source:
# Find the minimum value in select path
_snake_case = min(__A , graph[parent[s]][s] )
_snake_case = parent[s]
max_flow += path_flow
_snake_case = sink
while v != source:
_snake_case = parent[v]
graph[u][v] -= path_flow
graph[v][u] += path_flow
_snake_case = parent[v]
for i in range(len(__A ) ):
for j in range(len(graph[0] ) ):
if graph[i][j] == 0 and temp[i][j] > 0:
res.append((i, j) )
return res
if __name__ == "__main__":
print(mincut(test_graph, source=0, sink=5))
| 495 |
'''simple docstring'''
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class __UpperCAmelCase :
def __init__( self , lowerCAmelCase_ , lowerCAmelCase_=2 , lowerCAmelCase_=8 , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=True , lowerCAmelCase_=99 , lowerCAmelCase_=16 , lowerCAmelCase_=5 , lowerCAmelCase_=2 , lowerCAmelCase_=36 , lowerCAmelCase_="gelu" , lowerCAmelCase_=0.0 , lowerCAmelCase_=0.0 , lowerCAmelCase_=5_12 , lowerCAmelCase_=16 , lowerCAmelCase_=2 , lowerCAmelCase_=0.02 , lowerCAmelCase_=3 , lowerCAmelCase_=4 , lowerCAmelCase_=None , ):
"""simple docstring"""
_snake_case = parent
_snake_case = batch_size
_snake_case = seq_length
_snake_case = is_training
_snake_case = use_input_mask
_snake_case = use_token_type_ids
_snake_case = use_labels
_snake_case = vocab_size
_snake_case = hidden_size
_snake_case = num_hidden_layers
_snake_case = num_attention_heads
_snake_case = intermediate_size
_snake_case = hidden_act
_snake_case = hidden_dropout_prob
_snake_case = attention_probs_dropout_prob
_snake_case = max_position_embeddings
_snake_case = type_vocab_size
_snake_case = type_sequence_label_size
_snake_case = initializer_range
_snake_case = num_labels
_snake_case = num_choices
_snake_case = scope
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_snake_case = None
if self.use_input_mask:
_snake_case = random_attention_mask([self.batch_size, self.seq_length] )
_snake_case = None
if self.use_token_type_ids:
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_snake_case = None
_snake_case = None
_snake_case = None
if self.use_labels:
_snake_case = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_snake_case = ids_tensor([self.batch_size] , self.num_choices )
_snake_case = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self ):
"""simple docstring"""
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase_ , initializer_range=self.initializer_range , )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.get_config()
_snake_case = 3_00
return config
def lowerCamelCase ( self ):
"""simple docstring"""
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) = self.prepare_config_and_inputs()
_snake_case = True
_snake_case = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
_snake_case = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = MraModel(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
_snake_case = model(lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
_snake_case = model(lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ):
"""simple docstring"""
_snake_case = True
_snake_case = MraModel(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , encoder_attention_mask=lowerCAmelCase_ , )
_snake_case = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , encoder_hidden_states=lowerCAmelCase_ , )
_snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = MraForMaskedLM(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = MraForQuestionAnswering(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , start_positions=lowerCAmelCase_ , end_positions=lowerCAmelCase_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.num_labels
_snake_case = MraForSequenceClassification(lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.num_labels
_snake_case = MraForTokenClassification(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = model(lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase ( self , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ ):
"""simple docstring"""
_snake_case = self.num_choices
_snake_case = MraForMultipleChoice(config=lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
model.eval()
_snake_case = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_snake_case = model(
lowerCAmelCase_ , attention_mask=lowerCAmelCase_ , token_type_ids=lowerCAmelCase_ , labels=lowerCAmelCase_ , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.prepare_config_and_inputs()
(
(
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) , (
_snake_case
) ,
) = config_and_inputs
_snake_case = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class __UpperCAmelCase ( _lowerCamelCase , unittest.TestCase ):
__lowercase = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
__lowercase = False
__lowercase = False
__lowercase = False
__lowercase = False
__lowercase = ()
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = MraModelTester(self )
_snake_case = ConfigTester(self , config_class=lowerCAmelCase_ , hidden_size=37 )
def lowerCamelCase ( self ):
"""simple docstring"""
self.config_tester.run_common_tests()
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
_snake_case = type
self.model_tester.create_and_check_model(*lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase_ )
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase_ )
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_snake_case = MraModel.from_pretrained(lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
@unittest.skip(reason='MRA does not output attentions' )
def lowerCamelCase ( self ):
"""simple docstring"""
return
@require_torch
class __UpperCAmelCase ( unittest.TestCase ):
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = MraModel.from_pretrained('uw-madison/mra-base-512-4' )
_snake_case = torch.arange(2_56 ).unsqueeze(0 )
with torch.no_grad():
_snake_case = model(lowerCAmelCase_ )[0]
_snake_case = torch.Size((1, 2_56, 7_68) )
self.assertEqual(output.shape , lowerCAmelCase_ )
_snake_case = torch.tensor(
[[[-0.0140, 0.0830, -0.0381], [0.1546, 0.1402, 0.0220], [0.1162, 0.0851, 0.0165]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1E-4 ) )
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = MraForMaskedLM.from_pretrained('uw-madison/mra-base-512-4' )
_snake_case = torch.arange(2_56 ).unsqueeze(0 )
with torch.no_grad():
_snake_case = model(lowerCAmelCase_ )[0]
_snake_case = 5_02_65
_snake_case = torch.Size((1, 2_56, vocab_size) )
self.assertEqual(output.shape , lowerCAmelCase_ )
_snake_case = torch.tensor(
[[[9.2595, -3.6038, 11.8819], [9.3869, -3.2693, 11.0956], [11.8524, -3.4938, 13.1210]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1E-4 ) )
@slow
def lowerCamelCase ( self ):
"""simple docstring"""
_snake_case = MraForMaskedLM.from_pretrained('uw-madison/mra-base-4096-8-d3' )
_snake_case = torch.arange(40_96 ).unsqueeze(0 )
with torch.no_grad():
_snake_case = model(lowerCAmelCase_ )[0]
_snake_case = 5_02_65
_snake_case = torch.Size((1, 40_96, vocab_size) )
self.assertEqual(output.shape , lowerCAmelCase_ )
_snake_case = torch.tensor(
[[[5.4789, -2.3564, 7.5064], [7.9067, -1.3369, 9.9668], [9.0712, -1.8106, 7.0380]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , lowerCAmelCase_ , atol=1E-4 ) )
| 495 | 1 |
import math
from collections.abc import Iterator
from itertools import takewhile
def _A ( lowerCAmelCase_ : int ):
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCAmelCase_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = 2
while True:
if is_prime(lowerCAmelCase_ ):
yield num
num += 1
def _A ( lowerCAmelCase_ : int = 200_0000 ):
"""simple docstring"""
return sum(takewhile(lambda lowerCAmelCase_ : x < n , prime_generator() ) )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 711 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def _A ( lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), F'Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), F'Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})'
def _A ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int , lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Any , lowerCAmelCase_ : List[str]=True ):
"""simple docstring"""
model.train()
lowerCAmelCase__ = model(lowerCAmelCase_ )
lowerCAmelCase__ = F.mse_loss(lowerCAmelCase_ , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(lowerCAmelCase_ )
def _A ( lowerCAmelCase_ : str , lowerCAmelCase_ : Tuple=False ):
"""simple docstring"""
set_seed(42 )
lowerCAmelCase__ = RegressionModel()
lowerCAmelCase__ = deepcopy(lowerCAmelCase_ )
lowerCAmelCase__ = RegressionDataset(length=80 )
lowerCAmelCase__ = DataLoader(lowerCAmelCase_ , batch_size=16 )
model.to(accelerator.device )
if sched:
lowerCAmelCase__ = AdamW(params=model.parameters() , lr=1E-3 )
lowerCAmelCase__ = AdamW(params=ddp_model.parameters() , lr=1E-3 )
lowerCAmelCase__ = LambdaLR(lowerCAmelCase_ , lr_lambda=lambda lowerCAmelCase_ : epoch**0.65 )
lowerCAmelCase__ = LambdaLR(lowerCAmelCase_ , lr_lambda=lambda lowerCAmelCase_ : epoch**0.65 )
# Make a copy of `model`
if sched:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(lowerCAmelCase_ , lowerCAmelCase_ )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def _A ( lowerCAmelCase_ : str ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = get_training_setup(lowerCAmelCase_ )
# Use a single batch
lowerCAmelCase__ , lowerCAmelCase__ = next(iter(lowerCAmelCase_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather((ddp_input, ddp_target) )
lowerCAmelCase__ , lowerCAmelCase__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCAmelCase_ ):
step_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
# Sync grads
step_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCAmelCase__ = ddp_input[torch.randperm(len(lowerCAmelCase_ ) )]
def _A ( lowerCAmelCase_ : str ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = get_training_setup(lowerCAmelCase_ )
# Use a single batch
lowerCAmelCase__ , lowerCAmelCase__ = next(iter(lowerCAmelCase_ ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather((ddp_input, ddp_target) )
lowerCAmelCase__ , lowerCAmelCase__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(lowerCAmelCase_ ):
step_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
else:
# Sync grads
step_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCAmelCase__ = ddp_input[torch.randperm(len(lowerCAmelCase_ ) )]
def _A ( lowerCAmelCase_ : Tuple=False , lowerCAmelCase_ : Optional[int]=False ):
"""simple docstring"""
lowerCAmelCase__ = Accelerator(
split_batches=lowerCAmelCase_ , dispatch_batches=lowerCAmelCase_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = get_training_setup(lowerCAmelCase_ )
for iteration, batch in enumerate(lowerCAmelCase_ ):
lowerCAmelCase__ , lowerCAmelCase__ = batch.values()
# Gather the distributed inputs and targs for the base model
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather((ddp_input, ddp_target) )
lowerCAmelCase__ , lowerCAmelCase__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(lowerCAmelCase_ ):
step_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(lowerCAmelCase_ ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), F'Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})'
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), F'Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})'
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
lowerCAmelCase__ = ddp_input[torch.randperm(len(lowerCAmelCase_ ) )]
GradientState._reset_state()
def _A ( lowerCAmelCase_ : List[Any]=False , lowerCAmelCase_ : Dict=False ):
"""simple docstring"""
lowerCAmelCase__ = Accelerator(
split_batches=lowerCAmelCase_ , dispatch_batches=lowerCAmelCase_ , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = get_training_setup(lowerCAmelCase_ , lowerCAmelCase_ )
for iteration, batch in enumerate(lowerCAmelCase_ ):
lowerCAmelCase__ , lowerCAmelCase__ = batch.values()
# Gather the distributed inputs and targs for the base model
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.gather((ddp_input, ddp_target) )
lowerCAmelCase__ , lowerCAmelCase__ = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(lowerCAmelCase_ )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(lowerCAmelCase_ ):
step_model(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), F'Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]["lr"]}\nDDP opt: {ddp_opt.param_groups[0]["lr"]}\n'
lowerCAmelCase__ = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(lowerCAmelCase_ ))
if accelerator.num_processes > 1:
check_model_parameters(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = Accelerator()
lowerCAmelCase__ = RegressionDataset(length=80 )
lowerCAmelCase__ = DataLoader(lowerCAmelCase_ , batch_size=16 )
lowerCAmelCase__ = RegressionDataset(length=96 )
lowerCAmelCase__ = DataLoader(lowerCAmelCase_ , batch_size=16 )
lowerCAmelCase__ , lowerCAmelCase__ = accelerator.prepare(lowerCAmelCase_ , lowerCAmelCase_ )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(lowerCAmelCase_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCAmelCase_ )
if iteration < len(lowerCAmelCase_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(lowerCAmelCase_ ):
assert id(accelerator.gradient_state.active_dataloader ) == id(lowerCAmelCase_ )
if batch_num < len(lowerCAmelCase_ ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = Accelerator()
lowerCAmelCase__ = accelerator.state
if state.local_process_index == 0:
print("**Test `accumulate` gradient accumulation with dataloader break**" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("**Test NOOP `no_sync` context manager**" )
test_noop_sync(lowerCAmelCase_ )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("**Test Distributed `no_sync` context manager**" )
test_distributed_sync(lowerCAmelCase_ )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation, " , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation(lowerCAmelCase_ , lowerCAmelCase_ )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("<" , "2.0" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , "`split_batches=False`, `dispatch_batches=False`**" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"**Test `accumulate` gradient accumulation with optimizer and scheduler, " , F'`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**' , )
test_gradient_accumulation_with_opt_and_scheduler(lowerCAmelCase_ , lowerCAmelCase_ )
def _A ( lowerCAmelCase_ : str ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 125 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {
"""SCUT-DLVCLab/lilt-roberta-en-base""": (
"""https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"""
),
}
class a_ (_a ):
__lowerCAmelCase : List[str] = """lilt"""
def __init__( self , snake_case_=3_0_5_2_2 , snake_case_=7_6_8 , snake_case_=1_2 , snake_case_=1_2 , snake_case_=3_0_7_2 , snake_case_="gelu" , snake_case_=0.1 , snake_case_=0.1 , snake_case_=5_1_2 , snake_case_=2 , snake_case_=0.02 , snake_case_=1E-12 , snake_case_=0 , snake_case_="absolute" , snake_case_=None , snake_case_=4 , snake_case_=1_0_2_4 , **snake_case_ , ):
super().__init__(pad_token_id=snake_case_ , **snake_case_ )
_lowerCAmelCase : Optional[int] = vocab_size
_lowerCAmelCase : Dict = hidden_size
_lowerCAmelCase : List[Any] = num_hidden_layers
_lowerCAmelCase : Optional[int] = num_attention_heads
_lowerCAmelCase : List[Any] = hidden_act
_lowerCAmelCase : int = intermediate_size
_lowerCAmelCase : Dict = hidden_dropout_prob
_lowerCAmelCase : str = attention_probs_dropout_prob
_lowerCAmelCase : Any = max_position_embeddings
_lowerCAmelCase : Optional[Any] = type_vocab_size
_lowerCAmelCase : List[str] = initializer_range
_lowerCAmelCase : Tuple = layer_norm_eps
_lowerCAmelCase : Any = position_embedding_type
_lowerCAmelCase : Any = classifier_dropout
_lowerCAmelCase : int = channel_shrink_ratio
_lowerCAmelCase : Any = max_ad_position_embeddings
| 384 |
'''simple docstring'''
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
UpperCamelCase_ = logging.get_logger(__name__)
# General docstring
UpperCamelCase_ = """ResNetConfig"""
# Base docstring
UpperCamelCase_ = """microsoft/resnet-50"""
UpperCamelCase_ = [1, 20_48, 7, 7]
# Image classification docstring
UpperCamelCase_ = """microsoft/resnet-50"""
UpperCamelCase_ = """tiger cat"""
UpperCamelCase_ = [
"""microsoft/resnet-50""",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class a_ (nn.Module ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ = 3 , snake_case_ = 1 , snake_case_ = "relu" ):
super().__init__()
_lowerCAmelCase : List[str] = nn.Convad(
snake_case_ , snake_case_ , kernel_size=snake_case_ , stride=snake_case_ , padding=kernel_size // 2 , bias=snake_case_ )
_lowerCAmelCase : Tuple = nn.BatchNormad(snake_case_ )
_lowerCAmelCase : Optional[int] = ACTaFN[activation] if activation is not None else nn.Identity()
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = self.convolution(snake_case_ )
_lowerCAmelCase : int = self.normalization(snake_case_ )
_lowerCAmelCase : str = self.activation(snake_case_ )
return hidden_state
class a_ (nn.Module ):
def __init__( self , snake_case_ ):
super().__init__()
_lowerCAmelCase : str = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
_lowerCAmelCase : Union[str, Any] = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
_lowerCAmelCase : Any = config.num_channels
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Any = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
"""Make sure that the channel dimension of the pixel values match with the one set in the configuration.""" )
_lowerCAmelCase : int = self.embedder(snake_case_ )
_lowerCAmelCase : Dict = self.pooler(snake_case_ )
return embedding
class a_ (nn.Module ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ = 2 ):
super().__init__()
_lowerCAmelCase : List[Any] = nn.Convad(snake_case_ , snake_case_ , kernel_size=1 , stride=snake_case_ , bias=snake_case_ )
_lowerCAmelCase : Union[str, Any] = nn.BatchNormad(snake_case_ )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Dict = self.convolution(snake_case_ )
_lowerCAmelCase : Any = self.normalization(snake_case_ )
return hidden_state
class a_ (nn.Module ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ = 1 , snake_case_ = "relu" ):
super().__init__()
_lowerCAmelCase : Dict = in_channels != out_channels or stride != 1
_lowerCAmelCase : List[str] = (
ResNetShortCut(snake_case_ , snake_case_ , stride=snake_case_ ) if should_apply_shortcut else nn.Identity()
)
_lowerCAmelCase : List[str] = nn.Sequential(
ResNetConvLayer(snake_case_ , snake_case_ , stride=snake_case_ ) , ResNetConvLayer(snake_case_ , snake_case_ , activation=snake_case_ ) , )
_lowerCAmelCase : Tuple = ACTaFN[activation]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Optional[int] = hidden_state
_lowerCAmelCase : Union[str, Any] = self.layer(snake_case_ )
_lowerCAmelCase : Union[str, Any] = self.shortcut(snake_case_ )
hidden_state += residual
_lowerCAmelCase : str = self.activation(snake_case_ )
return hidden_state
class a_ (nn.Module ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ = 1 , snake_case_ = "relu" , snake_case_ = 4 ):
super().__init__()
_lowerCAmelCase : Tuple = in_channels != out_channels or stride != 1
_lowerCAmelCase : int = out_channels // reduction
_lowerCAmelCase : Any = (
ResNetShortCut(snake_case_ , snake_case_ , stride=snake_case_ ) if should_apply_shortcut else nn.Identity()
)
_lowerCAmelCase : Dict = nn.Sequential(
ResNetConvLayer(snake_case_ , snake_case_ , kernel_size=1 ) , ResNetConvLayer(snake_case_ , snake_case_ , stride=snake_case_ ) , ResNetConvLayer(snake_case_ , snake_case_ , kernel_size=1 , activation=snake_case_ ) , )
_lowerCAmelCase : Optional[Any] = ACTaFN[activation]
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Dict = hidden_state
_lowerCAmelCase : Optional[Any] = self.layer(snake_case_ )
_lowerCAmelCase : Union[str, Any] = self.shortcut(snake_case_ )
hidden_state += residual
_lowerCAmelCase : Any = self.activation(snake_case_ )
return hidden_state
class a_ (nn.Module ):
def __init__( self , snake_case_ , snake_case_ , snake_case_ , snake_case_ = 2 , snake_case_ = 2 , ):
super().__init__()
_lowerCAmelCase : Optional[Any] = ResNetBottleNeckLayer if config.layer_type == """bottleneck""" else ResNetBasicLayer
_lowerCAmelCase : Optional[int] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(snake_case_ , snake_case_ , stride=snake_case_ , activation=config.hidden_act ) , *[layer(snake_case_ , snake_case_ , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def __UpperCamelCase ( self , snake_case_ ):
_lowerCAmelCase : Union[str, Any] = input
for layer in self.layers:
_lowerCAmelCase : List[Any] = layer(snake_case_ )
return hidden_state
class a_ (nn.Module ):
def __init__( self , snake_case_ ):
super().__init__()
_lowerCAmelCase : Optional[Any] = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
snake_case_ , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
_lowerCAmelCase : int = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(snake_case_ , config.depths[1:] ):
self.stages.append(ResNetStage(snake_case_ , snake_case_ , snake_case_ , depth=snake_case_ ) )
def __UpperCamelCase ( self , snake_case_ , snake_case_ = False , snake_case_ = True ):
_lowerCAmelCase : Optional[Any] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
_lowerCAmelCase : Any = hidden_states + (hidden_state,)
_lowerCAmelCase : Dict = stage_module(snake_case_ )
if output_hidden_states:
_lowerCAmelCase : int = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=snake_case_ , hidden_states=snake_case_ , )
class a_ (_a ):
__lowerCAmelCase : str = ResNetConfig
__lowerCAmelCase : Dict = """resnet"""
__lowerCAmelCase : List[str] = """pixel_values"""
__lowerCAmelCase : Any = True
def __UpperCamelCase ( self , snake_case_ ):
if isinstance(snake_case_ , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode="""fan_out""" , nonlinearity="""relu""" )
elif isinstance(snake_case_ , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def __UpperCamelCase ( self , snake_case_ , snake_case_=False ):
if isinstance(snake_case_ , snake_case_ ):
_lowerCAmelCase : Union[str, Any] = value
UpperCamelCase_ = r"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
UpperCamelCase_ = r"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
"""The bare ResNet model outputting raw features without any specific head on top.""" , _a , )
class a_ (_a ):
def __init__( self , snake_case_ ):
super().__init__(snake_case_ )
_lowerCAmelCase : Any = config
_lowerCAmelCase : List[Any] = ResNetEmbeddings(snake_case_ )
_lowerCAmelCase : List[Any] = ResNetEncoder(snake_case_ )
_lowerCAmelCase : Union[str, Any] = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(snake_case_ )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=snake_case_ , config_class=_CONFIG_FOR_DOC , modality="""vision""" , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None ):
_lowerCAmelCase : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : Union[str, Any] = self.embedder(snake_case_ )
_lowerCAmelCase : Tuple = self.encoder(
snake_case_ , output_hidden_states=snake_case_ , return_dict=snake_case_ )
_lowerCAmelCase : int = encoder_outputs[0]
_lowerCAmelCase : int = self.pooler(snake_case_ )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=snake_case_ , pooler_output=snake_case_ , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
"""
ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for
ImageNet.
""" , _a , )
class a_ (_a ):
def __init__( self , snake_case_ ):
super().__init__(snake_case_ )
_lowerCAmelCase : Union[str, Any] = config.num_labels
_lowerCAmelCase : Any = ResNetModel(snake_case_ )
# classification head
_lowerCAmelCase : List[Any] = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(snake_case_ )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=snake_case_ , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __UpperCamelCase ( self , snake_case_ = None , snake_case_ = None , snake_case_ = None , snake_case_ = None , ):
_lowerCAmelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : Tuple = self.resnet(snake_case_ , output_hidden_states=snake_case_ , return_dict=snake_case_ )
_lowerCAmelCase : int = outputs.pooler_output if return_dict else outputs[1]
_lowerCAmelCase : int = self.classifier(snake_case_ )
_lowerCAmelCase : str = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
_lowerCAmelCase : Tuple = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
_lowerCAmelCase : Any = """single_label_classification"""
else:
_lowerCAmelCase : Union[str, Any] = """multi_label_classification"""
if self.config.problem_type == "regression":
_lowerCAmelCase : Optional[int] = MSELoss()
if self.num_labels == 1:
_lowerCAmelCase : Optional[int] = loss_fct(logits.squeeze() , labels.squeeze() )
else:
_lowerCAmelCase : Union[str, Any] = loss_fct(snake_case_ , snake_case_ )
elif self.config.problem_type == "single_label_classification":
_lowerCAmelCase : int = CrossEntropyLoss()
_lowerCAmelCase : Tuple = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
_lowerCAmelCase : List[Any] = BCEWithLogitsLoss()
_lowerCAmelCase : List[Any] = loss_fct(snake_case_ , snake_case_ )
if not return_dict:
_lowerCAmelCase : List[str] = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=snake_case_ , logits=snake_case_ , hidden_states=outputs.hidden_states )
@add_start_docstrings(
"""
ResNet backbone, to be used with frameworks like DETR and MaskFormer.
""" , _a , )
class a_ (_a , _a ):
def __init__( self , snake_case_ ):
super().__init__(snake_case_ )
super()._init_backbone(snake_case_ )
_lowerCAmelCase : List[Any] = [config.embedding_size] + config.hidden_sizes
_lowerCAmelCase : List[Any] = ResNetEmbeddings(snake_case_ )
_lowerCAmelCase : str = ResNetEncoder(snake_case_ )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(snake_case_ )
@replace_return_docstrings(output_type=snake_case_ , config_class=_CONFIG_FOR_DOC )
def __UpperCamelCase ( self , snake_case_ , snake_case_ = None , snake_case_ = None ):
_lowerCAmelCase : List[Any] = return_dict if return_dict is not None else self.config.use_return_dict
_lowerCAmelCase : Tuple = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
_lowerCAmelCase : Optional[int] = self.embedder(snake_case_ )
_lowerCAmelCase : List[Any] = self.encoder(snake_case_ , output_hidden_states=snake_case_ , return_dict=snake_case_ )
_lowerCAmelCase : Any = outputs.hidden_states
_lowerCAmelCase : Tuple = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
_lowerCAmelCase : Any = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=snake_case_ , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=snake_case_ , )
| 384 | 1 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a_ : Tuple = {'configuration_mmbt': ['MMBTConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Any = ['MMBTForClassification', 'MMBTModel', 'ModalEmbeddings']
if TYPE_CHECKING:
from .configuration_mmbt import MMBTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mmbt import MMBTForClassification, MMBTModel, ModalEmbeddings
else:
import sys
a_ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 484 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a_ : Optional[int] = logging.get_logger(__name__)
a_ : Any = {'vocab_file': 'spiece.model'}
a_ : List[Any] = {
'vocab_file': {
'bert_for_seq_generation': (
'https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'
),
}
}
a_ : List[Any] = {'bert_for_seq_generation': 5_1_2}
class lowerCamelCase__ ( UpperCAmelCase_):
"""simple docstring"""
_A = VOCAB_FILES_NAMES
_A = PRETRAINED_VOCAB_FILES_MAP
_A = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_A = []
_A = ['input_ids', 'attention_mask']
def __init__(self , __a , __a="<s>" , __a="</s>" , __a="<unk>" , __a="<pad>" , __a="<::::>" , __a = None , **__a , ):
'''simple docstring'''
lowerCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=__a , eos_token=__a , unk_token=__a , pad_token=__a , sep_token=__a , sp_model_kwargs=self.sp_model_kwargs , **__a , )
lowerCamelCase = vocab_file
lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__a )
@property
def _a (self ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def _a (self ):
'''simple docstring'''
lowerCamelCase = {self.convert_ids_to_tokens(__a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ):
'''simple docstring'''
lowerCamelCase = self.__dict__.copy()
lowerCamelCase = None
return state
def __setstate__(self , __a ):
'''simple docstring'''
lowerCamelCase = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
lowerCamelCase = {}
lowerCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a (self , __a ):
'''simple docstring'''
return self.sp_model.encode(__a , out_type=__a )
def _a (self , __a ):
'''simple docstring'''
return self.sp_model.piece_to_id(__a )
def _a (self , __a ):
'''simple docstring'''
lowerCamelCase = self.sp_model.IdToPiece(__a )
return token
def _a (self , __a ):
'''simple docstring'''
lowerCamelCase = []
lowerCamelCase = ""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__a ) + token
lowerCamelCase = []
else:
current_sub_tokens.append(__a )
out_string += self.sp_model.decode(__a )
return out_string.strip()
def _a (self , __a , __a = None ):
'''simple docstring'''
if not os.path.isdir(__a ):
logger.error(F"""Vocabulary path ({save_directory}) should be a directory""" )
return
lowerCamelCase = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __a )
elif not os.path.isfile(self.vocab_file ):
with open(__a , "wb" ) as fi:
lowerCamelCase = self.sp_model.serialized_model_proto()
fi.write(__a )
return (out_vocab_file,) | 484 | 1 |
from typing import List
import numpy as np
def UpperCAmelCase_ (_lowerCAmelCase : Optional[int] ):
__UpperCamelCase : Optional[Any] = {key: len(SCREAMING_SNAKE_CASE__ ) for key, value in gen_kwargs.items() if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
if len(set(lists_lengths.values() ) ) > 1:
raise RuntimeError(
(
"Sharding is ambiguous for this dataset: "
+ "we found several data sources lists of different lengths, and we don't know over which list we should parallelize:\n"
+ "\n".join(F'''\t- key {key} has length {length}''' for key, length in lists_lengths.items() )
+ "\nTo fix this, check the 'gen_kwargs' and make sure to use lists only for data sources, "
+ "and use tuples otherwise. In the end there should only be one single list, or several lists with the same length."
) )
__UpperCamelCase : Optional[int] = max(lists_lengths.values() , default=0 )
return max(1 , SCREAMING_SNAKE_CASE__ )
def UpperCAmelCase_ (_lowerCAmelCase : Dict , _lowerCAmelCase : int ):
__UpperCamelCase : Dict = []
for group_idx in range(SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase : Tuple = num_shards // max_num_jobs + (group_idx < (num_shards % max_num_jobs))
if num_shards_to_add == 0:
break
__UpperCamelCase : Dict = shards_indices_per_group[-1].stop if shards_indices_per_group else 0
__UpperCamelCase : Dict = range(SCREAMING_SNAKE_CASE__ , start + num_shards_to_add )
shards_indices_per_group.append(SCREAMING_SNAKE_CASE__ )
return shards_indices_per_group
def UpperCAmelCase_ (_lowerCAmelCase : List[Any] , _lowerCAmelCase : Union[str, Any] ):
__UpperCamelCase : Optional[int] = _number_of_shards_in_gen_kwargs(SCREAMING_SNAKE_CASE__ )
if num_shards == 1:
return [dict(SCREAMING_SNAKE_CASE__ )]
else:
__UpperCamelCase : str = _distribute_shards(num_shards=SCREAMING_SNAKE_CASE__ , max_num_jobs=SCREAMING_SNAKE_CASE__ )
return [
{
key: [value[shard_idx] for shard_idx in shard_indices_per_group[group_idx]]
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
else value
for key, value in gen_kwargs.items()
}
for group_idx in range(len(SCREAMING_SNAKE_CASE__ ) )
]
def UpperCAmelCase_ (_lowerCAmelCase : int ):
return {
key: [value for gen_kwargs in gen_kwargs_list for value in gen_kwargs[key]]
if isinstance(gen_kwargs_list[0][key] , SCREAMING_SNAKE_CASE__ )
else gen_kwargs_list[0][key]
for key in gen_kwargs_list[0]
}
def UpperCAmelCase_ (_lowerCAmelCase : Tuple , _lowerCAmelCase : str ):
__UpperCamelCase : List[str] = {len(SCREAMING_SNAKE_CASE__ ) for value in gen_kwargs.values() if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )}
__UpperCamelCase : str = {}
for size in list_sizes:
__UpperCamelCase : Union[str, Any] = list(range(SCREAMING_SNAKE_CASE__ ) )
rng.shuffle(indices_per_size[size] )
# Now let's copy the gen_kwargs and shuffle the lists based on their sizes
__UpperCamelCase : str = dict(SCREAMING_SNAKE_CASE__ )
for key, value in shuffled_kwargs.items():
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase : Optional[int] = [value[i] for i in indices_per_size[len(SCREAMING_SNAKE_CASE__ )]]
return shuffled_kwargs | 327 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import BeitConfig, BeitForImageClassification, BeitForMaskedImageModeling, BeitImageProcessor
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ : Union[str, Any] = logging.get_logger(__name__)
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False ):
'''simple docstring'''
_snake_case = "backbone." if is_semantic else ""
_snake_case = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f'''{prefix}blocks.{i}.norm1.weight''', f'''beit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm1.bias''', f'''beit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.weight''', f'''beit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append(
(f'''{prefix}blocks.{i}.attn.proj.bias''', f'''beit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.weight''', f'''beit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.norm2.bias''', f'''beit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.weight''', f'''beit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc1.bias''', f'''beit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.weight''', f'''beit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((f'''{prefix}blocks.{i}.mlp.fc2.bias''', f'''beit.encoder.layer.{i}.output.dense.bias''') )
# projection layer + position embeddings
rename_keys.extend(
[
(f'''{prefix}cls_token''', "beit.embeddings.cls_token"),
(f'''{prefix}patch_embed.proj.weight''', "beit.embeddings.patch_embeddings.projection.weight"),
(f'''{prefix}patch_embed.proj.bias''', "beit.embeddings.patch_embeddings.projection.bias"),
(f'''{prefix}pos_embed''', "beit.embeddings.position_embeddings"),
] )
if has_lm_head:
# mask token + layernorm
rename_keys.extend(
[
("mask_token", "beit.embeddings.mask_token"),
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
] )
else:
# layernorm + classification head
rename_keys.extend(
[
("fc_norm.weight", "beit.pooler.layernorm.weight"),
("fc_norm.bias", "beit.pooler.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
return rename_keys
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False , SCREAMING_SNAKE_CASE__=False ):
'''simple docstring'''
for i in range(config.num_hidden_layers ):
_snake_case = "backbone." if is_semantic else ""
# queries, keys and values
_snake_case = state_dict.pop(f'''{prefix}blocks.{i}.attn.qkv.weight''' )
_snake_case = state_dict.pop(f'''{prefix}blocks.{i}.attn.q_bias''' )
_snake_case = state_dict.pop(f'''{prefix}blocks.{i}.attn.v_bias''' )
_snake_case = in_proj_weight[
: config.hidden_size, :
]
_snake_case = q_bias
_snake_case = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
_snake_case = in_proj_weight[
-config.hidden_size :, :
]
_snake_case = v_bias
# gamma_1 and gamma_2
# we call them lambda because otherwise they are renamed when using .from_pretrained
_snake_case = state_dict.pop(f'''{prefix}blocks.{i}.gamma_1''' )
_snake_case = state_dict.pop(f'''{prefix}blocks.{i}.gamma_2''' )
_snake_case = gamma_a
_snake_case = gamma_a
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
'''simple docstring'''
_snake_case = dct.pop(SCREAMING_SNAKE_CASE__ )
_snake_case = val
def snake_case_ ( ):
'''simple docstring'''
_snake_case = "http://images.cocodataset.org/val2017/000000039769.jpg"
_snake_case = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def snake_case_ ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__=False ):
'''simple docstring'''
_snake_case = False if "rvlcdip" in checkpoint_url else True
_snake_case = BeitConfig(use_absolute_position_embeddings=SCREAMING_SNAKE_CASE__ , use_mask_token=SCREAMING_SNAKE_CASE__ )
# size of the architecture
if "large" in checkpoint_url or "dit-l" in checkpoint_url:
_snake_case = 10_24
_snake_case = 40_96
_snake_case = 24
_snake_case = 16
# labels
if "rvlcdip" in checkpoint_url:
_snake_case = 16
_snake_case = "huggingface/label-files"
_snake_case = "rvlcdip-id2label.json"
_snake_case = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , repo_type="dataset" ) , "r" ) )
_snake_case = {int(SCREAMING_SNAKE_CASE__ ): v for k, v in idalabel.items()}
_snake_case = idalabel
_snake_case = {v: k for k, v in idalabel.items()}
# load state_dict of original model, remove and rename some keys
_snake_case = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE__ , map_location="cpu" )["model"]
_snake_case = create_rename_keys(SCREAMING_SNAKE_CASE__ , has_lm_head=SCREAMING_SNAKE_CASE__ )
for src, dest in rename_keys:
rename_key(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
read_in_q_k_v(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , has_lm_head=SCREAMING_SNAKE_CASE__ )
# load HuggingFace model
_snake_case = BeitForMaskedImageModeling(SCREAMING_SNAKE_CASE__ ) if has_lm_head else BeitForImageClassification(SCREAMING_SNAKE_CASE__ )
model.eval()
model.load_state_dict(SCREAMING_SNAKE_CASE__ )
# Check outputs on an image
_snake_case = BeitImageProcessor(
size=config.image_size , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE__ )
_snake_case = prepare_img()
_snake_case = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors="pt" )
_snake_case = encoding["pixel_values"]
_snake_case = model(SCREAMING_SNAKE_CASE__ )
_snake_case = outputs.logits
# verify logits
_snake_case = [1, 16] if "rvlcdip" in checkpoint_url else [1, 1_96, 81_92]
assert logits.shape == torch.Size(SCREAMING_SNAKE_CASE__ ), "Shape of logits not as expected"
Path(SCREAMING_SNAKE_CASE__ ).mkdir(exist_ok=SCREAMING_SNAKE_CASE__ )
print(f'''Saving model to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print(f'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
if push_to_hub:
if has_lm_head:
_snake_case = "dit-base" if "base" in checkpoint_url else "dit-large"
else:
_snake_case = "dit-base-finetuned-rvlcdip" if "dit-b" in checkpoint_url else "dit-large-finetuned-rvlcdip"
image_processor.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , organization="nielsr" , commit_message="Add image processor" , use_temp_dir=SCREAMING_SNAKE_CASE__ , )
model.push_to_hub(
repo_path_or_name=Path(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) , organization="nielsr" , commit_message="Add model" , use_temp_dir=SCREAMING_SNAKE_CASE__ , )
if __name__ == "__main__":
__magic_name__ : Union[str, Any] = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/dit/dit-pts/dit-base-224-p16-500k-62d53a.pth""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
)
__magic_name__ : Dict = parser.parse_args()
convert_dit_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 672 | 0 |
"""simple docstring"""
import contextlib
import copy
import random
from typing import Any, Dict, Iterable, Optional, Union
import numpy as np
import torch
from .utils import deprecate, is_transformers_available
if is_transformers_available():
import transformers
def __a ( _lowercase ):
"""simple docstring"""
random.seed(__lowerCAmelCase )
np.random.seed(__lowerCAmelCase )
torch.manual_seed(__lowerCAmelCase )
torch.cuda.manual_seed_all(__lowerCAmelCase )
# ^^ safe to call this function even if cuda is not available
class __SCREAMING_SNAKE_CASE :
def __init__( self :List[Any] ,__UpperCAmelCase :Iterable[torch.nn.Parameter] ,__UpperCAmelCase :float = 0.9_999 ,__UpperCAmelCase :float = 0.0 ,__UpperCAmelCase :int = 0 ,__UpperCAmelCase :bool = False ,__UpperCAmelCase :Union[float, int] = 1.0 ,__UpperCAmelCase :Union[float, int] = 2 / 3 ,__UpperCAmelCase :Optional[Any] = None ,__UpperCAmelCase :Dict[str, Any] = None ,**__UpperCAmelCase :Optional[int] ,) -> Optional[Any]:
"""simple docstring"""
if isinstance(lowerCamelCase__ ,torch.nn.Module ):
lowerCamelCase__ : List[Any] = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage`''' ,'''1.0.0''' ,lowerCamelCase__ ,standard_warn=lowerCamelCase__ ,)
lowerCamelCase__ : List[str] = parameters.parameters()
# set use_ema_warmup to True if a torch.nn.Module is passed for backwards compatibility
lowerCamelCase__ : Optional[int] = True
if kwargs.get('''max_value''' ,lowerCamelCase__ ) is not None:
lowerCamelCase__ : Tuple = "The `max_value` argument is deprecated. Please use `decay` instead."
deprecate('''max_value''' ,'''1.0.0''' ,lowerCamelCase__ ,standard_warn=lowerCamelCase__ )
lowerCamelCase__ : str = kwargs["max_value"]
if kwargs.get('''min_value''' ,lowerCamelCase__ ) is not None:
lowerCamelCase__ : Optional[int] = "The `min_value` argument is deprecated. Please use `min_decay` instead."
deprecate('''min_value''' ,'''1.0.0''' ,lowerCamelCase__ ,standard_warn=lowerCamelCase__ )
lowerCamelCase__ : Tuple = kwargs["min_value"]
lowerCamelCase__ : Optional[Any] = list(lowerCamelCase__ )
lowerCamelCase__ : Dict = [p.clone().detach() for p in parameters]
if kwargs.get('''device''' ,lowerCamelCase__ ) is not None:
lowerCamelCase__ : Any = "The `device` argument is deprecated. Please use `to` instead."
deprecate('''device''' ,'''1.0.0''' ,lowerCamelCase__ ,standard_warn=lowerCamelCase__ )
self.to(device=kwargs['''device'''] )
lowerCamelCase__ : Union[str, Any] = None
lowerCamelCase__ : int = decay
lowerCamelCase__ : Any = min_decay
lowerCamelCase__ : Optional[int] = update_after_step
lowerCamelCase__ : str = use_ema_warmup
lowerCamelCase__ : Union[str, Any] = inv_gamma
lowerCamelCase__ : Union[str, Any] = power
lowerCamelCase__ : List[str] = 0
lowerCamelCase__ : List[str] = None # set in `step()`
lowerCamelCase__ : Optional[int] = model_cls
lowerCamelCase__ : Union[str, Any] = model_config
@classmethod
def lowercase_ ( cls :int ,__UpperCAmelCase :Tuple ,__UpperCAmelCase :int ) -> "EMAModel":
"""simple docstring"""
lowerCamelCase__ : Optional[int] = model_cls.load_config(lowerCamelCase__ ,return_unused_kwargs=lowerCamelCase__ )
lowerCamelCase__ : Union[str, Any] = model_cls.from_pretrained(lowerCamelCase__ )
lowerCamelCase__ : List[str] = cls(model.parameters() ,model_cls=lowerCamelCase__ ,model_config=model.config )
ema_model.load_state_dict(lowerCamelCase__ )
return ema_model
def lowercase_ ( self :Optional[int] ,__UpperCAmelCase :int ) -> Dict:
"""simple docstring"""
if self.model_cls is None:
raise ValueError('''`save_pretrained` can only be used if `model_cls` was defined at __init__.''' )
if self.model_config is None:
raise ValueError('''`save_pretrained` can only be used if `model_config` was defined at __init__.''' )
lowerCamelCase__ : int = self.model_cls.from_config(self.model_config )
lowerCamelCase__ : Union[str, Any] = self.state_dict()
state_dict.pop('''shadow_params''' ,lowerCamelCase__ )
model.register_to_config(**lowerCamelCase__ )
self.copy_to(model.parameters() )
model.save_pretrained(lowerCamelCase__ )
def lowercase_ ( self :List[str] ,__UpperCAmelCase :int ) -> float:
"""simple docstring"""
lowerCamelCase__ : int = max(0 ,optimization_step - self.update_after_step - 1 )
if step <= 0:
return 0.0
if self.use_ema_warmup:
lowerCamelCase__ : int = 1 - (1 + step / self.inv_gamma) ** -self.power
else:
lowerCamelCase__ : Any = (1 + step) / (10 + step)
lowerCamelCase__ : int = min(lowerCamelCase__ ,self.decay )
# make sure decay is not smaller than min_decay
lowerCamelCase__ : Union[str, Any] = max(lowerCamelCase__ ,self.min_decay )
return cur_decay_value
@torch.no_grad()
def lowercase_ ( self :str ,__UpperCAmelCase :Iterable[torch.nn.Parameter] ) -> Dict:
"""simple docstring"""
if isinstance(lowerCamelCase__ ,torch.nn.Module ):
lowerCamelCase__ : Union[str, Any] = (
"Passing a `torch.nn.Module` to `ExponentialMovingAverage.step` is deprecated. "
"Please pass the parameters of the module instead."
)
deprecate(
'''passing a `torch.nn.Module` to `ExponentialMovingAverage.step`''' ,'''1.0.0''' ,lowerCamelCase__ ,standard_warn=lowerCamelCase__ ,)
lowerCamelCase__ : Any = parameters.parameters()
lowerCamelCase__ : Dict = list(lowerCamelCase__ )
self.optimization_step += 1
# Compute the decay factor for the exponential moving average.
lowerCamelCase__ : Tuple = self.get_decay(self.optimization_step )
lowerCamelCase__ : Any = decay
lowerCamelCase__ : Optional[Any] = 1 - decay
lowerCamelCase__ : Union[str, Any] = contextlib.nullcontext
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
import deepspeed
for s_param, param in zip(self.shadow_params ,lowerCamelCase__ ):
if is_transformers_available() and transformers.deepspeed.is_deepspeed_zeroa_enabled():
lowerCamelCase__ : str = deepspeed.zero.GatheredParameters(lowerCamelCase__ ,modifier_rank=lowerCamelCase__ )
with context_manager():
if param.requires_grad:
s_param.sub_(one_minus_decay * (s_param - param) )
else:
s_param.copy_(lowerCamelCase__ )
def lowercase_ ( self :Optional[int] ,__UpperCAmelCase :Iterable[torch.nn.Parameter] ) -> None:
"""simple docstring"""
lowerCamelCase__ : List[str] = list(lowerCamelCase__ )
for s_param, param in zip(self.shadow_params ,lowerCamelCase__ ):
param.data.copy_(s_param.to(param.device ).data )
def lowercase_ ( self :int ,__UpperCAmelCase :Dict=None ,__UpperCAmelCase :Optional[int]=None ) -> None:
"""simple docstring"""
lowerCamelCase__ : str = [
p.to(device=lowerCamelCase__ ,dtype=lowerCamelCase__ ) if p.is_floating_point() else p.to(device=lowerCamelCase__ )
for p in self.shadow_params
]
def lowercase_ ( self :List[Any] ) -> dict:
"""simple docstring"""
return {
"decay": self.decay,
"min_decay": self.min_decay,
"optimization_step": self.optimization_step,
"update_after_step": self.update_after_step,
"use_ema_warmup": self.use_ema_warmup,
"inv_gamma": self.inv_gamma,
"power": self.power,
"shadow_params": self.shadow_params,
}
def lowercase_ ( self :Optional[int] ,__UpperCAmelCase :Iterable[torch.nn.Parameter] ) -> None:
"""simple docstring"""
lowerCamelCase__ : Tuple = [param.detach().cpu().clone() for param in parameters]
def lowercase_ ( self :List[str] ,__UpperCAmelCase :Iterable[torch.nn.Parameter] ) -> None:
"""simple docstring"""
if self.temp_stored_params is None:
raise RuntimeError('''This ExponentialMovingAverage has no `store()`ed weights ''' '''to `restore()`''' )
for c_param, param in zip(self.temp_stored_params ,lowerCamelCase__ ):
param.data.copy_(c_param.data )
# Better memory-wise.
lowerCamelCase__ : int = None
def lowercase_ ( self :Union[str, Any] ,__UpperCAmelCase :dict ) -> None:
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = copy.deepcopy(lowerCamelCase__ )
lowerCamelCase__ : List[str] = state_dict.get('''decay''' ,self.decay )
if self.decay < 0.0 or self.decay > 1.0:
raise ValueError('''Decay must be between 0 and 1''' )
lowerCamelCase__ : Union[str, Any] = state_dict.get('''min_decay''' ,self.min_decay )
if not isinstance(self.min_decay ,lowerCamelCase__ ):
raise ValueError('''Invalid min_decay''' )
lowerCamelCase__ : List[str] = state_dict.get('''optimization_step''' ,self.optimization_step )
if not isinstance(self.optimization_step ,lowerCamelCase__ ):
raise ValueError('''Invalid optimization_step''' )
lowerCamelCase__ : List[Any] = state_dict.get('''update_after_step''' ,self.update_after_step )
if not isinstance(self.update_after_step ,lowerCamelCase__ ):
raise ValueError('''Invalid update_after_step''' )
lowerCamelCase__ : str = state_dict.get('''use_ema_warmup''' ,self.use_ema_warmup )
if not isinstance(self.use_ema_warmup ,lowerCamelCase__ ):
raise ValueError('''Invalid use_ema_warmup''' )
lowerCamelCase__ : int = state_dict.get('''inv_gamma''' ,self.inv_gamma )
if not isinstance(self.inv_gamma ,(float, int) ):
raise ValueError('''Invalid inv_gamma''' )
lowerCamelCase__ : Any = state_dict.get('''power''' ,self.power )
if not isinstance(self.power ,(float, int) ):
raise ValueError('''Invalid power''' )
lowerCamelCase__ : List[str] = state_dict.get('''shadow_params''' ,lowerCamelCase__ )
if shadow_params is not None:
lowerCamelCase__ : Optional[Any] = shadow_params
if not isinstance(self.shadow_params ,lowerCamelCase__ ):
raise ValueError('''shadow_params must be a list''' )
if not all(isinstance(lowerCamelCase__ ,torch.Tensor ) for p in self.shadow_params ):
raise ValueError('''shadow_params must all be Tensors''' )
| 718 | """simple docstring"""
def __a ( _lowercase ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] = ''''''
for ch in key:
if ch == " " or ch not in key_no_dups and ch.isalpha():
key_no_dups += ch
return key_no_dups
def __a ( _lowercase ):
"""simple docstring"""
lowerCamelCase__ : int = [chr(i + 65 ) for i in range(26 )]
# Remove duplicate characters from key
lowerCamelCase__ : Tuple = remove_duplicates(key.upper() )
lowerCamelCase__ : Optional[Any] = len(_lowercase )
# First fill cipher with key characters
lowerCamelCase__ : int = {alphabet[i]: char for i, char in enumerate(_lowercase )}
# Then map remaining characters in alphabet to
# the alphabet from the beginning
for i in range(len(_lowercase ) , 26 ):
lowerCamelCase__ : Optional[int] = alphabet[i - offset]
# Ensure we are not mapping letters to letters previously mapped
while char in key:
offset -= 1
lowerCamelCase__ : Optional[Any] = alphabet[i - offset]
lowerCamelCase__ : Union[str, Any] = char
return cipher_alphabet
def __a ( _lowercase , _lowercase ):
"""simple docstring"""
return "".join(cipher_map.get(_lowercase , _lowercase ) for ch in message.upper() )
def __a ( _lowercase , _lowercase ):
"""simple docstring"""
lowerCamelCase__ : Tuple = {v: k for k, v in cipher_map.items()}
return "".join(rev_cipher_map.get(_lowercase , _lowercase ) for ch in message.upper() )
def __a ( ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] = input('''Enter message to encode or decode: ''' ).strip()
lowerCamelCase__ : List[str] = input('''Enter keyword: ''' ).strip()
lowerCamelCase__ : int = input('''Encipher or decipher? E/D:''' ).strip()[0].lower()
try:
lowerCamelCase__ : int = {'''e''': encipher, '''d''': decipher}[option]
except KeyError:
raise KeyError('''invalid input option''' )
lowerCamelCase__ : Optional[Any] = create_cipher_map(_lowercase )
print(func(_lowercase , _lowercase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 121 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __UpperCamelCase ( unittest.TestCase ):
def __init__( self :Optional[int] ,_UpperCamelCase :Optional[Any] ,_UpperCamelCase :List[str]=1_3 ,_UpperCamelCase :str=3 ,_UpperCamelCase :Tuple=2_2_4 ,_UpperCamelCase :Union[str, Any]=3_0 ,_UpperCamelCase :Dict=4_0_0 ,_UpperCamelCase :Optional[int]=True ,_UpperCamelCase :Optional[int]=None ,_UpperCamelCase :int=True ,_UpperCamelCase :str=[0.5, 0.5, 0.5] ,_UpperCamelCase :Union[str, Any]=[0.5, 0.5, 0.5] ,):
snake_case_ : Tuple = size if size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case_ : List[Any] = parent
snake_case_ : str = batch_size
snake_case_ : Tuple = num_channels
snake_case_ : Any = image_size
snake_case_ : int = min_resolution
snake_case_ : Optional[Any] = max_resolution
snake_case_ : int = do_resize
snake_case_ : List[str] = size
snake_case_ : Optional[int] = do_normalize
snake_case_ : Optional[Any] = image_mean
snake_case_ : Dict = image_std
def a__ ( self :int ):
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __UpperCamelCase ( __UpperCAmelCase , unittest.TestCase ):
lowercase : List[Any] = ViTImageProcessor if is_vision_available() else None
def a__ ( self :Optional[int] ):
snake_case_ : str = EfficientFormerImageProcessorTester(self )
@property
def a__ ( self :int ):
return self.image_proc_tester.prepare_image_processor_dict()
def a__ ( self :Tuple ):
snake_case_ : str = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase__ ,"""image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase__ ,"""size""" ) )
def a__ ( self :str ):
pass
def a__ ( self :Dict ):
snake_case_ : int = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case_ : Optional[int] = prepare_image_inputs(self.image_proc_tester ,equal_resolution=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ ,Image.Image )
# Test not batched input
snake_case_ : List[str] = image_processor(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) ,)
# Test batched
snake_case_ : List[Any] = image_processor(lowerCamelCase__ ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) ,)
def a__ ( self :Any ):
snake_case_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case_ : Optional[int] = prepare_image_inputs(self.image_proc_tester ,equal_resolution=lowerCamelCase__ ,numpify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ ,np.ndarray )
# Test not batched input
snake_case_ : Union[str, Any] = image_processor(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) ,)
# Test batched
snake_case_ : Union[str, Any] = image_processor(lowerCamelCase__ ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) ,)
def a__ ( self :Optional[Any] ):
snake_case_ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case_ : List[Any] = prepare_image_inputs(self.image_proc_tester ,equal_resolution=lowerCamelCase__ ,torchify=lowerCamelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase__ ,torch.Tensor )
# Test not batched input
snake_case_ : Any = image_processor(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) ,)
# Test batched
snake_case_ : List[str] = image_processor(lowerCamelCase__ ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) ,) | 334 |
'''simple docstring'''
import tempfile
import unittest
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from transformers.testing_utils import (
is_torch_available,
require_optimum,
require_torch,
slow,
)
if is_torch_available():
import torch
@require_torch
@require_optimum
@slow
class A ( unittest.TestCase ):
def A__ ( self ) -> List[str]:
'''simple docstring'''
lowercase__ = """hf-internal-testing/tiny-random-t5"""
lowercase__ = AutoTokenizer.from_pretrained(lowerCamelCase__ )
lowercase__ = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ )
lowercase__ = tokenizer("""This is me""" , return_tensors="""pt""" )
lowercase__ = model.to_bettertransformer()
self.assertTrue(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
lowercase__ = model.generate(**lowerCamelCase__ )
lowercase__ = model.reverse_bettertransformer()
self.assertFalse(any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model.named_modules() ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCamelCase__ )
lowercase__ = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ )
self.assertFalse(
any("""BetterTransformer""" in mod.__class__.__name__ for _, mod in model_reloaded.named_modules() ) )
lowercase__ = model_reloaded.generate(**lowerCamelCase__ )
self.assertTrue(torch.allclose(lowerCamelCase__ , lowerCamelCase__ ) )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
lowercase__ = """hf-internal-testing/tiny-random-t5"""
lowercase__ = AutoModelForSeqaSeqLM.from_pretrained(lowerCamelCase__ )
lowercase__ = model.to_bettertransformer()
with tempfile.TemporaryDirectory() as tmpdirname:
with self.assertRaises(lowerCamelCase__ ):
model.save_pretrained(lowerCamelCase__ )
lowercase__ = model.reverse_bettertransformer()
model.save_pretrained(lowerCamelCase__ )
| 325 | 0 |
import unittest
from queue import Empty
from threading import Thread
from transformers import AutoTokenizer, TextIteratorStreamer, TextStreamer, is_torch_available
from transformers.testing_utils import CaptureStdout, require_torch, torch_device
from ..test_modeling_common import ids_tensor
if is_torch_available():
import torch
from transformers import AutoModelForCausalLM
@require_torch
class _UpperCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : Union[str, Any]) -> Tuple:
"""simple docstring"""
_UpperCamelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
_UpperCamelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(A_)
_UpperCamelCase = -1
_UpperCamelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(A_)
_UpperCamelCase = model.generate(A_ , max_new_tokens=10 , do_sample=A_)
_UpperCamelCase = tokenizer.decode(greedy_ids[0])
with CaptureStdout() as cs:
_UpperCamelCase = TextStreamer(A_)
model.generate(A_ , max_new_tokens=10 , do_sample=A_ , streamer=A_)
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCamelCase = cs.out[:-1]
self.assertEqual(A_ , A_)
def __UpperCAmelCase ( self : int) -> Tuple:
"""simple docstring"""
_UpperCamelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
_UpperCamelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(A_)
_UpperCamelCase = -1
_UpperCamelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(A_)
_UpperCamelCase = model.generate(A_ , max_new_tokens=10 , do_sample=A_)
_UpperCamelCase = tokenizer.decode(greedy_ids[0])
_UpperCamelCase = TextIteratorStreamer(A_)
_UpperCamelCase = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_UpperCamelCase = Thread(target=model.generate , kwargs=A_)
thread.start()
_UpperCamelCase = ""
for new_text in streamer:
streamer_text += new_text
self.assertEqual(A_ , A_)
def __UpperCAmelCase ( self : List[str]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
_UpperCamelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(A_)
_UpperCamelCase = -1
_UpperCamelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(A_)
_UpperCamelCase = model.generate(A_ , max_new_tokens=10 , do_sample=A_)
_UpperCamelCase = greedy_ids[:, input_ids.shape[1] :]
_UpperCamelCase = tokenizer.decode(new_greedy_ids[0])
with CaptureStdout() as cs:
_UpperCamelCase = TextStreamer(A_ , skip_prompt=A_)
model.generate(A_ , max_new_tokens=10 , do_sample=A_ , streamer=A_)
# The greedy text should be printed to stdout, except for the final "\n" in the streamer
_UpperCamelCase = cs.out[:-1]
self.assertEqual(A_ , A_)
def __UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
_UpperCamelCase = AutoTokenizer.from_pretrained("distilgpt2")
_UpperCamelCase = AutoModelForCausalLM.from_pretrained("distilgpt2").to(A_)
_UpperCamelCase = -1
_UpperCamelCase = torch.ones((1, 5) , device=A_).long() * model.config.bos_token_id
with CaptureStdout() as cs:
_UpperCamelCase = TextStreamer(A_ , skip_special_tokens=A_)
model.generate(A_ , max_new_tokens=1 , do_sample=A_ , streamer=A_)
# The prompt contains a special token, so the streamer should not print it. As such, the output text, when
# re-tokenized, must only contain one token
_UpperCamelCase = cs.out[:-1] # Remove the final "\n"
_UpperCamelCase = tokenizer(A_ , return_tensors="pt")
self.assertEqual(streamer_text_tokenized.input_ids.shape , (1, 1))
def __UpperCAmelCase ( self : List[Any]) -> Union[str, Any]:
"""simple docstring"""
_UpperCamelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2")
_UpperCamelCase = AutoModelForCausalLM.from_pretrained("hf-internal-testing/tiny-random-gpt2").to(A_)
_UpperCamelCase = -1
_UpperCamelCase = ids_tensor((1, 5) , vocab_size=model.config.vocab_size).to(A_)
_UpperCamelCase = TextIteratorStreamer(A_ , timeout=0.0_01)
_UpperCamelCase = {"input_ids": input_ids, "max_new_tokens": 10, "do_sample": False, "streamer": streamer}
_UpperCamelCase = Thread(target=model.generate , kwargs=A_)
thread.start()
# The streamer will timeout after 0.001 seconds, so an exception will be raised
with self.assertRaises(A_):
_UpperCamelCase = ""
for new_text in streamer:
streamer_text += new_text
| 716 | import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class _UpperCAmelCase ( lowerCAmelCase ):
'''simple docstring'''
def __UpperCAmelCase ( self : List[str] , lowercase_ : str) -> str:
"""simple docstring"""
with open(lowercase_ , encoding="utf-8") as input_file:
_UpperCamelCase = re.compile(R"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)")
_UpperCamelCase = input_file.read()
_UpperCamelCase = regexp.search(lowercase_)
return match
def __UpperCAmelCase ( self : Optional[Any] , lowercase_ : str) -> int:
"""simple docstring"""
with open(lowercase_ , encoding="utf-8") as input_file:
_UpperCamelCase = re.compile(R"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL)
_UpperCamelCase = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
_UpperCamelCase = regexp.finditer(lowercase_)
_UpperCamelCase = [match for match in matches if match is not None and match.group(1) is not None]
return matches[0] if matches else None
def __UpperCAmelCase ( self : int) -> int:
"""simple docstring"""
_UpperCamelCase = Path("./datasets")
_UpperCamelCase = list(dataset_paths.absolute().glob("**/*.py"))
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(lowercase_)):
raise AssertionError(f'open(...) must use utf-8 encoding in {dataset}')
def __UpperCAmelCase ( self : str) -> str:
"""simple docstring"""
_UpperCamelCase = Path("./datasets")
_UpperCamelCase = list(dataset_paths.absolute().glob("**/*.py"))
for dataset in dataset_files:
if self._no_print_statements(str(lowercase_)):
raise AssertionError(f'print statement found in {dataset}. Use datasets.logger/logging instead.')
| 82 | 0 |
"""simple docstring"""
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"stable diffusion controlnet",
"0.22.0",
"Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.",
standard_warn=False,
stacklevel=3,
)
| 608 |
def _a ( __UpperCamelCase : int ):
if not isinstance(__UpperCamelCase ,__UpperCamelCase ):
raise ValueError('''check_bouncy() accepts only integer arguments''' )
lowerCAmelCase__ : List[str] = str(__UpperCamelCase )
lowerCAmelCase__ : List[Any] = ''''''.join(sorted(__UpperCamelCase ) )
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def _a ( __UpperCamelCase : float = 99 ):
if not 0 < percent < 100:
raise ValueError('''solution() only accepts values from 0 to 100''' )
lowerCAmelCase__ : Union[str, Any] = 0
lowerCAmelCase__ : Tuple = 1
while True:
if check_bouncy(__UpperCamelCase ):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(9_9)}""")
| 233 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
'kssteven/ibert-roberta-base': 'https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json',
'kssteven/ibert-roberta-large': 'https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json',
'kssteven/ibert-roberta-large-mnli': (
'https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json'
),
}
class __A ( SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase__ = "ibert"
def __init__( self : Union[str, Any] , __snake_case : Tuple=3_0_5_2_2 , __snake_case : Dict=7_6_8 , __snake_case : Dict=1_2 , __snake_case : Dict=1_2 , __snake_case : Union[str, Any]=3_0_7_2 , __snake_case : Optional[Any]="gelu" , __snake_case : Optional[Any]=0.1 , __snake_case : int=0.1 , __snake_case : Optional[Any]=5_1_2 , __snake_case : Optional[Any]=2 , __snake_case : Tuple=0.02 , __snake_case : str=1E-12 , __snake_case : Union[str, Any]=1 , __snake_case : Optional[int]=0 , __snake_case : List[Any]=2 , __snake_case : Tuple="absolute" , __snake_case : Any=False , __snake_case : List[Any]="none" , **__snake_case : Dict , ) -> Optional[Any]:
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
__magic_name__: int = vocab_size
__magic_name__: str = hidden_size
__magic_name__: List[str] = num_hidden_layers
__magic_name__: Tuple = num_attention_heads
__magic_name__: Any = hidden_act
__magic_name__: str = intermediate_size
__magic_name__: Optional[int] = hidden_dropout_prob
__magic_name__: List[Any] = attention_probs_dropout_prob
__magic_name__: str = max_position_embeddings
__magic_name__: Any = type_vocab_size
__magic_name__: Any = initializer_range
__magic_name__: int = layer_norm_eps
__magic_name__: Optional[int] = position_embedding_type
__magic_name__: Union[str, Any] = quant_mode
__magic_name__: Optional[int] = force_dequant
class __A ( SCREAMING_SNAKE_CASE_ ):
@property
def lowerCamelCase__ ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
__magic_name__: Optional[int] = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__magic_name__: Any = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 716 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import evaluate
import numpy as np
import torch
from datasets import load_dataset
from PIL import Image
from torchvision.transforms import (
CenterCrop,
Compose,
Normalize,
RandomHorizontalFlip,
RandomResizedCrop,
Resize,
ToTensor,
)
import transformers
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
AutoConfig,
AutoImageProcessor,
AutoModelForImageClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__lowerCamelCase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('4.31.0')
require_version('datasets>=1.8.0', 'To fix: pip install -r examples/pytorch/image-classification/requirements.txt')
__lowerCamelCase = list(MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING.keys())
__lowerCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
def a ( __UpperCAmelCase : str ) -> Optional[int]:
with open(__UpperCAmelCase , """rb""" ) as f:
__magic_name__: Union[str, Any] = Image.open(__UpperCAmelCase )
return im.convert("""RGB""" )
@dataclass
class __A :
UpperCAmelCase__ = field(
default=SCREAMING_SNAKE_CASE_ ,metadata={
"help": "Name of a dataset from the hub (could be your own, possibly private dataset hosted on the hub)."
} ,)
UpperCAmelCase__ = field(
default=SCREAMING_SNAKE_CASE_ ,metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
UpperCAmelCase__ = field(default=SCREAMING_SNAKE_CASE_ ,metadata={"help": "A folder containing the training data."} )
UpperCAmelCase__ = field(default=SCREAMING_SNAKE_CASE_ ,metadata={"help": "A folder containing the validation data."} )
UpperCAmelCase__ = field(
default=0.1_5 ,metadata={"help": "Percent to split off of train for validation."} )
UpperCAmelCase__ = field(
default=SCREAMING_SNAKE_CASE_ ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} ,)
UpperCAmelCase__ = field(
default=SCREAMING_SNAKE_CASE_ ,metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} ,)
def lowerCamelCase__ ( self : List[Any] ) -> List[str]:
if self.dataset_name is None and (self.train_dir is None and self.validation_dir is None):
raise ValueError(
"""You must specify either a dataset name from the hub or a train and/or validation directory.""" )
@dataclass
class __A :
UpperCAmelCase__ = field(
default="google/vit-base-patch16-224-in21k" ,metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} ,)
UpperCAmelCase__ = field(
default=SCREAMING_SNAKE_CASE_ ,metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(SCREAMING_SNAKE_CASE_ )} ,)
UpperCAmelCase__ = field(
default=SCREAMING_SNAKE_CASE_ ,metadata={"help": "Pretrained config name or path if not the same as model_name"} )
UpperCAmelCase__ = field(
default=SCREAMING_SNAKE_CASE_ ,metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
UpperCAmelCase__ = field(
default="main" ,metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} ,)
UpperCAmelCase__ = field(default=SCREAMING_SNAKE_CASE_ ,metadata={"help": "Name or path of preprocessor config."} )
UpperCAmelCase__ = field(
default=SCREAMING_SNAKE_CASE_ ,metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} ,)
UpperCAmelCase__ = field(
default=SCREAMING_SNAKE_CASE_ ,metadata={"help": "Will enable to load a pretrained model whose head dimensions are different."} ,)
def a ( __UpperCAmelCase : Tuple ) -> List[Any]:
__magic_name__: Dict = torch.stack([example["""pixel_values"""] for example in examples] )
__magic_name__: str = torch.tensor([example["""labels"""] for example in examples] )
return {"pixel_values": pixel_values, "labels": labels}
def a ( ) -> int:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
__magic_name__: Dict = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__magic_name__, __magic_name__, __magic_name__: Optional[int] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__magic_name__, __magic_name__, __magic_name__: int = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("""run_image_classification""" , __UpperCAmelCase , __UpperCAmelCase )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
__magic_name__: Optional[Any] = training_args.get_process_log_level()
logger.setLevel(__UpperCAmelCase )
transformers.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'
+ f'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(f'Training/evaluation parameters {training_args}' )
# Detecting last checkpoint.
__magic_name__: Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__magic_name__: int = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'Output directory ({training_args.output_dir}) already exists and is not empty. '
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Initialize our dataset and prepare it for the 'image-classification' task.
if data_args.dataset_name is not None:
__magic_name__: Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir , task="""image-classification""" , use_auth_token=True if model_args.use_auth_token else None , )
else:
__magic_name__: Optional[Any] = {}
if data_args.train_dir is not None:
__magic_name__: List[Any] = os.path.join(data_args.train_dir , """**""" )
if data_args.validation_dir is not None:
__magic_name__: str = os.path.join(data_args.validation_dir , """**""" )
__magic_name__: int = load_dataset(
"""imagefolder""" , data_files=__UpperCAmelCase , cache_dir=model_args.cache_dir , task="""image-classification""" , )
# If we don't have a validation split, split off a percentage of train as validation.
__magic_name__: Optional[int] = None if """validation""" in dataset.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , __UpperCAmelCase ) and data_args.train_val_split > 0.0:
__magic_name__: int = dataset["""train"""].train_test_split(data_args.train_val_split )
__magic_name__: Optional[int] = split["""train"""]
__magic_name__: Tuple = split["""test"""]
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
__magic_name__: int = dataset["""train"""].features["""labels"""].names
__magic_name__, __magic_name__: List[Any] = {}, {}
for i, label in enumerate(__UpperCAmelCase ):
__magic_name__: Tuple = str(__UpperCAmelCase )
__magic_name__: Union[str, Any] = label
# Load the accuracy metric from the datasets package
__magic_name__: Union[str, Any] = evaluate.load("""accuracy""" )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__UpperCAmelCase : Union[str, Any] ):
return metric.compute(predictions=np.argmax(p.predictions , axis=1 ) , references=p.label_ids )
__magic_name__: int = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(__UpperCAmelCase ) , labelaid=__UpperCAmelCase , idalabel=__UpperCAmelCase , finetuning_task="""image-classification""" , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
__magic_name__: Dict = AutoModelForImageClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
__magic_name__: Any = AutoImageProcessor.from_pretrained(
model_args.image_processor_name or model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Define torchvision transforms to be applied to each image.
if "shortest_edge" in image_processor.size:
__magic_name__: str = image_processor.size["""shortest_edge"""]
else:
__magic_name__: Tuple = (image_processor.size["""height"""], image_processor.size["""width"""])
__magic_name__: List[str] = Normalize(mean=image_processor.image_mean , std=image_processor.image_std )
__magic_name__: Tuple = Compose(
[
RandomResizedCrop(__UpperCAmelCase ),
RandomHorizontalFlip(),
ToTensor(),
normalize,
] )
__magic_name__: str = Compose(
[
Resize(__UpperCAmelCase ),
CenterCrop(__UpperCAmelCase ),
ToTensor(),
normalize,
] )
def train_transforms(__UpperCAmelCase : Dict ):
__magic_name__: Optional[Any] = [
_train_transforms(pil_img.convert("""RGB""" ) ) for pil_img in example_batch["""image"""]
]
return example_batch
def val_transforms(__UpperCAmelCase : Optional[Any] ):
__magic_name__: Optional[Any] = [_val_transforms(pil_img.convert("""RGB""" ) ) for pil_img in example_batch["""image"""]]
return example_batch
if training_args.do_train:
if "train" not in dataset:
raise ValueError("""--do_train requires a train dataset""" )
if data_args.max_train_samples is not None:
__magic_name__: List[Any] = (
dataset["""train"""].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
dataset["train"].set_transform(__UpperCAmelCase )
if training_args.do_eval:
if "validation" not in dataset:
raise ValueError("""--do_eval requires a validation dataset""" )
if data_args.max_eval_samples is not None:
__magic_name__: Any = (
dataset["""validation"""].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
dataset["validation"].set_transform(__UpperCAmelCase )
# Initalize our trainer
__magic_name__: Optional[int] = Trainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=dataset["""train"""] if training_args.do_train else None , eval_dataset=dataset["""validation"""] if training_args.do_eval else None , compute_metrics=__UpperCAmelCase , tokenizer=__UpperCAmelCase , data_collator=__UpperCAmelCase , )
# Training
if training_args.do_train:
__magic_name__: str = None
if training_args.resume_from_checkpoint is not None:
__magic_name__: List[str] = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__magic_name__: List[str] = last_checkpoint
__magic_name__: Union[str, Any] = trainer.train(resume_from_checkpoint=__UpperCAmelCase )
trainer.save_model()
trainer.log_metrics("""train""" , train_result.metrics )
trainer.save_metrics("""train""" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
__magic_name__: Tuple = trainer.evaluate()
trainer.log_metrics("""eval""" , __UpperCAmelCase )
trainer.save_metrics("""eval""" , __UpperCAmelCase )
# Write model card and (optionally) push to hub
__magic_name__: Optional[int] = {
"""finetuned_from""": model_args.model_name_or_path,
"""tasks""": """image-classification""",
"""dataset""": data_args.dataset_name,
"""tags""": ["""image-classification""", """vision"""],
}
if training_args.push_to_hub:
trainer.push_to_hub(**__UpperCAmelCase )
else:
trainer.create_model_card(**__UpperCAmelCase )
if __name__ == "__main__":
main()
| 213 | 0 |
"""simple docstring"""
from typing import Union
import fire
import torch
from tqdm import tqdm
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase = "cpu" , _lowerCamelCase = None ) -> None:
'''simple docstring'''
_lowerCamelCase : Any = torch.load(_lowerCamelCase , map_location=_lowerCamelCase )
for k, v in tqdm(state_dict.items() ):
if not isinstance(_lowerCamelCase , torch.Tensor ):
raise TypeError("FP16 conversion only works on paths that are saved state dicts, like pytorch_model.bin" )
_lowerCamelCase : List[str] = v.half()
if save_path is None: # overwrite src_path
_lowerCamelCase : Union[str, Any] = src_path
torch.save(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
fire.Fire(convert) | 46 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__UpperCamelCase : Tuple = logging.get_logger(__name__)
__UpperCamelCase : int = {
"""unc-nlp/lxmert-base-uncased""": """https://huggingface.co/unc-nlp/lxmert-base-uncased/resolve/main/config.json""",
}
class __UpperCamelCase ( _lowerCAmelCase ):
__snake_case :Union[str, Any] = 'lxmert'
__snake_case :Union[str, Any] = {}
def __init__( self : List[str] , _lowerCAmelCase : Dict=3_0522 , _lowerCAmelCase : List[str]=768 , _lowerCAmelCase : Union[str, Any]=12 , _lowerCAmelCase : Union[str, Any]=9500 , _lowerCAmelCase : Union[str, Any]=1600 , _lowerCAmelCase : Optional[Any]=400 , _lowerCAmelCase : Tuple=3072 , _lowerCAmelCase : List[Any]="gelu" , _lowerCAmelCase : int=0.1 , _lowerCAmelCase : Dict=0.1 , _lowerCAmelCase : Tuple=512 , _lowerCAmelCase : Tuple=2 , _lowerCAmelCase : Optional[Any]=0.02 , _lowerCAmelCase : List[str]=1e-12 , _lowerCAmelCase : Any=9 , _lowerCAmelCase : Optional[Any]=5 , _lowerCAmelCase : Any=5 , _lowerCAmelCase : Dict=2048 , _lowerCAmelCase : int=4 , _lowerCAmelCase : Optional[Any]=6.67 , _lowerCAmelCase : Optional[Any]=True , _lowerCAmelCase : int=True , _lowerCAmelCase : int=True , _lowerCAmelCase : str=True , _lowerCAmelCase : Dict=True , _lowerCAmelCase : int=True , _lowerCAmelCase : int=True , **_lowerCAmelCase : Tuple , ) -> Dict:
"""simple docstring"""
__lowercase = vocab_size
__lowercase = hidden_size
__lowercase = num_attention_heads
__lowercase = hidden_act
__lowercase = intermediate_size
__lowercase = hidden_dropout_prob
__lowercase = attention_probs_dropout_prob
__lowercase = max_position_embeddings
__lowercase = type_vocab_size
__lowercase = initializer_range
__lowercase = layer_norm_eps
__lowercase = num_qa_labels
__lowercase = num_object_labels
__lowercase = num_attr_labels
__lowercase = l_layers
__lowercase = x_layers
__lowercase = r_layers
__lowercase = visual_feat_dim
__lowercase = visual_pos_dim
__lowercase = visual_loss_normalizer
__lowercase = task_matched
__lowercase = task_mask_lm
__lowercase = task_obj_predict
__lowercase = task_qa
__lowercase = visual_obj_loss
__lowercase = visual_attr_loss
__lowercase = visual_feat_loss
__lowercase = {"""vision""": r_layers, """cross_encoder""": x_layers, """language""": l_layers}
super().__init__(**_lowerCAmelCase )
| 80 | 0 |
import pickle
import shutil
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, XGLMTokenizer, XGLMTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE : Any = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class snake_case ( __a, unittest.TestCase ):
"""simple docstring"""
_a = XGLMTokenizer
_a = XGLMTokenizerFast
_a = True
_a = True
def a__ ( self ) -> Optional[int]:
super().setUp()
# We have a SentencePiece fixture for testing
SCREAMING_SNAKE_CASE_ = XGLMTokenizer(lowerCAmelCase_, keep_accents=lowerCAmelCase_ )
tokenizer.save_pretrained(self.tmpdirname )
def a__ ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = '<pad>'
SCREAMING_SNAKE_CASE_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase_ ), lowerCAmelCase_ )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase_ ), lowerCAmelCase_ )
def a__ ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0], '<s>' )
self.assertEqual(vocab_keys[1], '<pad>' )
self.assertEqual(len(lowerCAmelCase_ ), 1008 )
def a__ ( self ) -> Dict:
self.assertEqual(self.get_tokenizer().vocab_size, 1008 )
def a__ ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ = XGLMTokenizer(lowerCAmelCase_, keep_accents=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('This is a test' )
self.assertListEqual(lowerCAmelCase_, ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase_ ), [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]], )
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
lowerCAmelCase_, [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
], )
SCREAMING_SNAKE_CASE_ = tokenizer.convert_tokens_to_ids(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_, [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
], )
SCREAMING_SNAKE_CASE_ = tokenizer.convert_ids_to_tokens(lowerCAmelCase_ )
self.assertListEqual(
lowerCAmelCase_, [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'<unk>',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'<unk>',
'.',
], )
@cached_property
def a__ ( self ) -> str:
return XGLMTokenizer.from_pretrained('facebook/xglm-564M' )
def a__ ( self ) -> Optional[int]:
with tempfile.NamedTemporaryFile() as f:
shutil.copyfile(lowerCAmelCase_, f.name )
SCREAMING_SNAKE_CASE_ = XGLMTokenizer(f.name, keep_accents=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = pickle.dumps(lowerCAmelCase_ )
pickle.loads(lowerCAmelCase_ )
def a__ ( self ) -> List[Any]:
if not self.test_rust_tokenizer:
return
SCREAMING_SNAKE_CASE_ = self.get_tokenizer()
SCREAMING_SNAKE_CASE_ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ = 'I was born in 92000, and this is falsé.'
SCREAMING_SNAKE_CASE_ = tokenizer.tokenize(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = rust_tokenizer.tokenize(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_, lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = tokenizer.encode(lowerCAmelCase_, add_special_tokens=lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = rust_tokenizer.encode(lowerCAmelCase_, add_special_tokens=lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_, lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = self.get_rust_tokenizer()
SCREAMING_SNAKE_CASE_ = tokenizer.encode(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = rust_tokenizer.encode(lowerCAmelCase_ )
self.assertListEqual(lowerCAmelCase_, lowerCAmelCase_ )
@slow
def a__ ( self ) -> int:
SCREAMING_SNAKE_CASE_ = 'Hello World!'
SCREAMING_SNAKE_CASE_ = [2, 31227, 4447, 35]
self.assertListEqual(lowerCAmelCase_, self.big_tokenizer.encode(lowerCAmelCase_ ) )
@slow
def a__ ( self ) -> Any:
SCREAMING_SNAKE_CASE_ = (
'This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will'
' add words that should not exsist and be tokenized to unk, such as saoneuhaoesuth'
)
# fmt: off
SCREAMING_SNAKE_CASE_ = [2, 1018, 67, 11, 1988, 2617, 5631, 278, 11, 3407, 48, 71630, 28085, 4, 3234, 157, 13, 6, 5, 6, 4, 3526, 768, 15, 659, 57, 298, 3983, 864, 129, 21, 6, 5, 13675, 377, 652, 7580, 10341, 155, 2817, 422, 1666, 7, 1674, 53, 113, 202277, 17892, 33, 60, 87, 4, 3234, 157, 61, 2667, 52376, 19, 88, 23, 735]
# fmt: on
self.assertListEqual(lowerCAmelCase_, self.big_tokenizer.encode(lowerCAmelCase_ ) )
@slow
def a__ ( self ) -> Tuple:
SCREAMING_SNAKE_CASE_ = {
'input_ids': [[2, 108825, 1163, 15, 88010, 473, 15898, 157, 13672, 1857, 312, 8, 238021, 1163, 53, 13672, 1857, 312, 8, 53283, 182396, 8, 18566, 16, 36733, 4101, 8, 230, 244017, 122553, 7, 15, 132597, 4, 293, 12511, 7610, 4, 3414, 132597, 9, 4, 32361, 362, 4, 734, 28512, 32569, 18, 4, 32361, 26096, 14982, 73, 18715, 21433, 235261, 15, 492, 12427, 16, 53, 18715, 21433, 65454, 15, 23659, 563, 16, 278, 597, 2843, 595, 7931, 182396, 64186, 22, 886, 595, 132981, 53, 25540, 3449, 43982, 39901, 5951, 878, 330, 4, 27694, 80269, 312, 53, 6517, 11780, 611, 20408, 5], [2, 6, 132597, 67, 42897, 33, 592, 8, 163729, 25540, 361, 136997, 109514, 173230, 7, 501, 60, 102913, 196, 5631, 235, 63243, 473, 6, 231757, 74, 5277, 7905, 53, 3095, 37317, 22, 454, 183874, 5], [2, 268, 31298, 46530, 6, 132935, 43831, 7, 597, 32, 24, 3688, 9865, 5]],
'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]
} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase_, model_name='facebook/xglm-564M', padding=lowerCAmelCase_, )
| 719 |
'''simple docstring'''
from math import ceil
def _UpperCamelCase ( lowerCAmelCase__: int = 1001 ) -> int:
SCREAMING_SNAKE_CASE_ = 1
for i in range(1 ,int(ceil(n / 2.0 ) ) ):
SCREAMING_SNAKE_CASE_ = 2 * i + 1
SCREAMING_SNAKE_CASE_ = 2 * i
SCREAMING_SNAKE_CASE_ = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
SCREAMING_SNAKE_CASE : List[str] = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number")
| 238 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
_lowerCamelCase = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
_lowerCamelCase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 674 |
from copy import deepcopy
import torch
import torch.nn.functional as F
from torch.optim import AdamW
from torch.optim.lr_scheduler import LambdaLR
from torch.utils.data import DataLoader
from accelerate.accelerator import Accelerator
from accelerate.state import GradientState
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import DistributedType, is_torch_version, set_seed
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : List[Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
for param, grad_param in zip(model_a.parameters() , model_b.parameters() ):
if not param.requires_grad:
continue
if not did_step:
# Grads should not be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is False
), f"Gradients in sync when they should not be at iteration {iteration}:\nmodel_a grad ({param.grad}) == model_b grad ({grad_param.grad})"
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , grad_param.grad ) is True
), f"Gradients not in sync when they should be at iteration {iteration}:\nmodel_a grad ({param.grad}) != model_b grad ({grad_param.grad})"
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str] , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Dict=True ):
"""simple docstring"""
model.train()
__a = model(_SCREAMING_SNAKE_CASE )
__a = F.mse_loss(_SCREAMING_SNAKE_CASE , target.to(output.device ) )
if not do_backward:
loss /= accelerator.gradient_accumulation_steps
loss.backward()
else:
accelerator.backward(_SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any] , _SCREAMING_SNAKE_CASE : Dict=False ):
"""simple docstring"""
set_seed(42 )
__a = RegressionModel()
__a = deepcopy(_SCREAMING_SNAKE_CASE )
__a = RegressionDataset(length=80 )
__a = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=16 )
model.to(accelerator.device )
if sched:
__a = AdamW(params=model.parameters() , lr=1e-3 )
__a = AdamW(params=ddp_model.parameters() , lr=1e-3 )
__a = LambdaLR(_SCREAMING_SNAKE_CASE , lr_lambda=lambda _SCREAMING_SNAKE_CASE : epoch**0.65 )
__a = LambdaLR(_SCREAMING_SNAKE_CASE , lr_lambda=lambda _SCREAMING_SNAKE_CASE : epoch**0.65 )
# Make a copy of `model`
if sched:
__a , __a , __a , __a = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
__a , __a = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if sched:
return (model, opt, sched, dataloader, ddp_model, ddp_opt, ddp_sched)
return model, ddp_model, dataloader
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Optional[Any] ):
"""simple docstring"""
__a , __a , __a = get_training_setup(_SCREAMING_SNAKE_CASE )
# Use a single batch
__a , __a = next(iter(_SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__a , __a = accelerator.gather((ddp_input, ddp_target) )
__a , __a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_SCREAMING_SNAKE_CASE ):
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Since `no_sync` is a noop, `ddp_model` and `model` grads should always be in sync
check_model_parameters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
assert torch.allclose(
param.grad , ddp_param.grad ), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
__a = ddp_input[torch.randperm(len(_SCREAMING_SNAKE_CASE ) )]
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : List[Any] ):
"""simple docstring"""
__a , __a , __a = get_training_setup(_SCREAMING_SNAKE_CASE )
# Use a single batch
__a , __a = next(iter(_SCREAMING_SNAKE_CASE ) ).values()
for iteration in range(3 ):
# Gather the distributed inputs and targs for the base model
__a , __a = accelerator.gather((ddp_input, ddp_target) )
__a , __a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
if iteration % 2 == 0:
# Accumulate grads locally
with accelerator.no_sync(_SCREAMING_SNAKE_CASE ):
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
# Sync grads
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if iteration % 2 == 0:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"Gradients in sync when they should not be:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
else:
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"Gradients not in sync when they should be:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
__a = ddp_input[torch.randperm(len(_SCREAMING_SNAKE_CASE ) )]
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any=False , _SCREAMING_SNAKE_CASE : int=False ):
"""simple docstring"""
__a = Accelerator(
split_batches=_SCREAMING_SNAKE_CASE , dispatch_batches=_SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__a , __a , __a = get_training_setup(_SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(_SCREAMING_SNAKE_CASE ):
__a , __a = batch.values()
# Gather the distributed inputs and targs for the base model
__a , __a = accelerator.gather((ddp_input, ddp_target) )
__a , __a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Do "gradient accumulation" (noop)
with accelerator.accumulate(_SCREAMING_SNAKE_CASE ):
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# DDP model and model should only be in sync when not (iteration % 2 == 0)
for param, ddp_param in zip(model.parameters() , ddp_model.parameters() ):
if not param.requires_grad:
continue
if ((iteration + 1) % 2 == 0) or (iteration == len(_SCREAMING_SNAKE_CASE ) - 1):
# Grads should be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is True
), f"Gradients not in sync when they should be at iteration {iteration}:\nModel grad ({param.grad}) != DDP grad ({ddp_param.grad})"
else:
# Grads should not be in sync
assert (
torch.allclose(param.grad , ddp_param.grad ) is False
), f"Gradients in sync when they should not be at iteration {iteration}:\nModel grad ({param.grad}) == DDP grad ({ddp_param.grad})"
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
__a = ddp_input[torch.randperm(len(_SCREAMING_SNAKE_CASE ) )]
GradientState._reset_state()
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Union[str, Any]=False , _SCREAMING_SNAKE_CASE : Tuple=False ):
"""simple docstring"""
__a = Accelerator(
split_batches=_SCREAMING_SNAKE_CASE , dispatch_batches=_SCREAMING_SNAKE_CASE , gradient_accumulation_steps=2 )
# Test that context manager behaves properly
__a , __a , __a , __a , __a , __a , __a = get_training_setup(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for iteration, batch in enumerate(_SCREAMING_SNAKE_CASE ):
__a , __a = batch.values()
# Gather the distributed inputs and targs for the base model
__a , __a = accelerator.gather((ddp_input, ddp_target) )
__a , __a = input.to(accelerator.device ), target.to(accelerator.device )
# Perform our initial ground truth step in non "DDP"
model.train()
ddp_model.train()
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
opt.step()
if ((iteration + 1) % 2 == 0) or ((iteration + 1) == len(_SCREAMING_SNAKE_CASE )):
if split_batches:
sched.step()
else:
for _ in range(accelerator.num_processes ):
sched.step()
opt.zero_grad()
# Perform gradient accumulation under wrapper
with accelerator.accumulate(_SCREAMING_SNAKE_CASE ):
step_model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
ddp_opt.step()
ddp_sched.step()
ddp_opt.zero_grad()
# Learning rates should be the same
assert (
opt.param_groups[0]["lr"] == ddp_opt.param_groups[0]["lr"]
), f"Learning rates found in each optimizer did not align\nopt: {opt.param_groups[0]['lr']}\nDDP opt: {ddp_opt.param_groups[0]['lr']}\n"
__a = (((iteration + 1) % 2) == 0) or ((iteration + 1) == len(_SCREAMING_SNAKE_CASE ))
if accelerator.num_processes > 1:
check_model_parameters(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Shuffle ddp_input on each iteration
torch.manual_seed(1337 + iteration )
GradientState._reset_state()
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = Accelerator()
__a = RegressionDataset(length=80 )
__a = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=16 )
__a = RegressionDataset(length=96 )
__a = DataLoader(_SCREAMING_SNAKE_CASE , batch_size=16 )
__a , __a = accelerator.prepare(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert accelerator.gradient_state.active_dataloader is None
for iteration, _ in enumerate(_SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_SCREAMING_SNAKE_CASE )
if iteration < len(_SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
if iteration == 1:
for batch_num, _ in enumerate(_SCREAMING_SNAKE_CASE ):
assert id(accelerator.gradient_state.active_dataloader ) == id(_SCREAMING_SNAKE_CASE )
if batch_num < len(_SCREAMING_SNAKE_CASE ) - 1:
assert not accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
else:
assert accelerator.gradient_state.end_of_dataloader
assert accelerator.gradient_state.active_dataloader is None
def lowerCAmelCase__ ( ):
"""simple docstring"""
__a = Accelerator()
__a = accelerator.state
if state.local_process_index == 0:
print("""**Test `accumulate` gradient accumulation with dataloader break**""" )
test_dataloader_break()
if state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print("""**Test NOOP `no_sync` context manager**""" )
test_noop_sync(_SCREAMING_SNAKE_CASE )
if state.distributed_type in (DistributedType.MULTI_GPU, DistributedType.MULTI_CPU):
if state.local_process_index == 0:
print("""**Test Distributed `no_sync` context manager**""" )
test_distributed_sync(_SCREAMING_SNAKE_CASE )
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation, """ , f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , )
test_gradient_accumulation(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Currently will break on torch 2.0 +, need to investigate why
if is_torch_version("""<""" , """2.0""" ) or state.distributed_type == DistributedType.NO:
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , """`split_batches=False`, `dispatch_batches=False`**""" , )
test_gradient_accumulation_with_opt_and_scheduler()
if state.distributed_type == DistributedType.MULTI_GPU:
for split_batch in [True, False]:
for dispatch_batches in [True, False]:
if not split_batch and not dispatch_batches:
continue
if state.local_process_index == 0:
print(
"""**Test `accumulate` gradient accumulation with optimizer and scheduler, """ , f"`split_batches={split_batch}` and `dispatch_batches={dispatch_batches}`**" , )
test_gradient_accumulation_with_opt_and_scheduler(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def lowerCAmelCase__ ( _SCREAMING_SNAKE_CASE : Any ):
"""simple docstring"""
main()
if __name__ == "__main__":
main()
| 225 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class snake_case :
'''simple docstring'''
A_ : Tuple = PegasusConfig
A_ : Optional[Any] = {}
A_ : Any = "gelu"
def __init__( self : Optional[int], _lowerCamelCase : Union[str, Any], _lowerCamelCase : str=13, _lowerCamelCase : Optional[Any]=7, _lowerCamelCase : Union[str, Any]=True, _lowerCamelCase : int=False, _lowerCamelCase : str=99, _lowerCamelCase : Union[str, Any]=32, _lowerCamelCase : str=2, _lowerCamelCase : List[Any]=4, _lowerCamelCase : Optional[Any]=37, _lowerCamelCase : Union[str, Any]=0.1, _lowerCamelCase : Optional[int]=0.1, _lowerCamelCase : Optional[Any]=40, _lowerCamelCase : List[str]=2, _lowerCamelCase : Dict=1, _lowerCamelCase : Any=0, ):
'''simple docstring'''
__A = parent
__A = batch_size
__A = seq_length
__A = is_training
__A = use_labels
__A = vocab_size
__A = hidden_size
__A = num_hidden_layers
__A = num_attention_heads
__A = intermediate_size
__A = hidden_dropout_prob
__A = attention_probs_dropout_prob
__A = max_position_embeddings
__A = eos_token_id
__A = pad_token_id
__A = bos_token_id
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
__A = ids_tensor([self.batch_size, self.seq_length - 1], self.vocab_size )
__A = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ), 1 )
__A = tf.concat([input_ids, eos_tensor], axis=1 )
__A = ids_tensor([self.batch_size, self.seq_length], self.vocab_size )
__A = self.config_cls(
vocab_size=self.vocab_size, d_model=self.hidden_size, encoder_layers=self.num_hidden_layers, decoder_layers=self.num_hidden_layers, encoder_attention_heads=self.num_attention_heads, decoder_attention_heads=self.num_attention_heads, encoder_ffn_dim=self.intermediate_size, decoder_ffn_dim=self.intermediate_size, dropout=self.hidden_dropout_prob, attention_dropout=self.attention_probs_dropout_prob, max_position_embeddings=self.max_position_embeddings, eos_token_ids=[2], bos_token_id=self.bos_token_id, pad_token_id=self.pad_token_id, decoder_start_token_id=self.pad_token_id, **self.config_updates, )
__A = prepare_pegasus_inputs_dict(_lowerCamelCase, _lowerCamelCase, _lowerCamelCase )
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Tuple, _lowerCamelCase : Union[str, Any], _lowerCamelCase : Tuple ):
'''simple docstring'''
__A = TFPegasusModel(config=_lowerCamelCase ).get_decoder()
__A = inputs_dict['''input_ids''']
__A = input_ids[:1, :]
__A = inputs_dict['''attention_mask'''][:1, :]
__A = inputs_dict['''head_mask''']
__A = 1
# first forward pass
__A = model(_lowerCamelCase, attention_mask=_lowerCamelCase, head_mask=_lowerCamelCase, use_cache=_lowerCamelCase )
__A , __A = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
__A = ids_tensor((self.batch_size, 3), config.vocab_size )
__A = tf.cast(ids_tensor((self.batch_size, 3), 2 ), tf.inta )
# append to next input_ids and
__A = tf.concat([input_ids, next_tokens], axis=-1 )
__A = tf.concat([attention_mask, next_attn_mask], axis=-1 )
__A = model(_lowerCamelCase, attention_mask=_lowerCamelCase )[0]
__A = model(_lowerCamelCase, attention_mask=_lowerCamelCase, past_key_values=_lowerCamelCase )[0]
self.parent.assertEqual(next_tokens.shape[1], output_from_past.shape[1] )
# select random slice
__A = int(ids_tensor((1,), output_from_past.shape[-1] ) )
__A = output_from_no_past[:, -3:, random_slice_idx]
__A = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(_lowerCamelCase, _lowerCamelCase, rtol=1e-3 )
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , __UpperCamelCase=None , ):
"""simple docstring"""
if attention_mask is None:
__A = tf.cast(tf.math.not_equal(__UpperCamelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__A = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__A = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__A = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__A = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class snake_case ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : str = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
A_ : Optional[Any] = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
A_ : Optional[int] = (
{
"conversational": TFPegasusForConditionalGeneration,
"feature-extraction": TFPegasusModel,
"summarization": TFPegasusForConditionalGeneration,
"text2text-generation": TFPegasusForConditionalGeneration,
"translation": TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
A_ : Tuple = True
A_ : Union[str, Any] = False
A_ : str = False
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ):
'''simple docstring'''
__A = TFPegasusModelTester(self )
__A = ConfigTester(self, config_class=_lowerCamelCase )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : str ):
'''simple docstring'''
__A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_lowerCamelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class snake_case ( unittest.TestCase ):
'''simple docstring'''
A_ : List[str] = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
A_ : str = [
"California's largest electricity provider has cut power to hundreds of thousands of customers in an effort to"
" reduce the risk of wildfires.",
"N-Dubz have revealed they\'re \"grateful\" to have been nominated for four Mobo Awards.",
] # differs slightly from pytorch, likely due to numerical differences in linear layers
A_ : Union[str, Any] = "google/pegasus-xsum"
@cached_property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Tuple ):
'''simple docstring'''
__A = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def _SCREAMING_SNAKE_CASE ( self : str, **_lowerCamelCase : str ):
'''simple docstring'''
__A = self.translate_src_text(**_lowerCamelCase )
assert self.expected_text == generated_words
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any], **_lowerCamelCase : Tuple ):
'''simple docstring'''
__A = self.tokenizer(self.src_text, **_lowerCamelCase, padding=_lowerCamelCase, return_tensors='''tf''' )
__A = self.model.generate(
model_inputs.input_ids, attention_mask=model_inputs.attention_mask, num_beams=2, use_cache=_lowerCamelCase, )
__A = self.tokenizer.batch_decode(generated_ids.numpy(), skip_special_tokens=_lowerCamelCase )
return generated_words
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
self._assert_generated_batch_equal_expected()
| 215 |
"""simple docstring"""
from __future__ import annotations
from math import pi
from typing import Protocol
import matplotlib.pyplot as plt
import numpy as np
class snake_case ( _lowerCAmelCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Tuple, _lowerCamelCase : float ):
'''simple docstring'''
return 0.0
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = min([-2_0, np.min(fft_results[1 : samplerate // 2 - 1] )] )
__A = max([2_0, np.max(fft_results[1 : samplerate // 2 - 1] )] )
return lowest, highest
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = 5_1_2
__A = [1] + [0] * (size - 1)
__A = [filter_type.process(__UpperCamelCase ) for item in inputs]
__A = [0] * (samplerate - size) # zero-padding
outputs += filler
__A = np.abs(np.fft.fft(__UpperCamelCase ) )
__A = 2_0 * np.logaa(__UpperCamelCase )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(2_4 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
# Display within reasonable bounds
__A = get_bounds(__UpperCamelCase , __UpperCamelCase )
plt.ylim(max([-8_0, bounds[0]] ) , min([8_0, bounds[1]] ) )
plt.ylabel('''Gain (dB)''' )
plt.plot(__UpperCamelCase )
plt.show()
def lowerCAmelCase ( __UpperCamelCase , __UpperCamelCase ):
"""simple docstring"""
__A = 5_1_2
__A = [1] + [0] * (size - 1)
__A = [filter_type.process(__UpperCamelCase ) for item in inputs]
__A = [0] * (samplerate - size) # zero-padding
outputs += filler
__A = np.angle(np.fft.fft(__UpperCamelCase ) )
# Frequencies on log scale from 24 to nyquist frequency
plt.xlim(2_4 , samplerate / 2 - 1 )
plt.xlabel('''Frequency (Hz)''' )
plt.xscale('''log''' )
plt.ylim(-2 * pi , 2 * pi )
plt.ylabel('''Phase shift (Radians)''' )
plt.plot(np.unwrap(__UpperCamelCase , -2 * pi ) )
plt.show()
| 215 | 1 |
import copy
import importlib.metadata
import json
import os
from dataclasses import dataclass
from typing import Any, Dict, Union
from packaging import version
from ..utils import is_torch_available, logging
if is_torch_available():
import torch
UpperCamelCase = logging.get_logger(__name__)
@dataclass
class lowerCAmelCase_ :
def __init__( self , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=6.0 , _lowerCAmelCase=None , _lowerCAmelCase=False , _lowerCAmelCase=False , _lowerCAmelCase=None , _lowerCAmelCase="fp4" , _lowerCAmelCase=False , **_lowerCAmelCase , ):
_lowercase : str = load_in_abit
_lowercase : List[Any] = load_in_abit
_lowercase : List[Any] = llm_inta_threshold
_lowercase : int = llm_inta_skip_modules
_lowercase : Optional[Any] = llm_inta_enable_fpaa_cpu_offload
_lowercase : List[str] = llm_inta_has_fpaa_weight
_lowercase : Tuple = bnb_abit_quant_type
_lowercase : Dict = bnb_abit_use_double_quant
if bnb_abit_compute_dtype is None:
_lowercase : Any = torch.floataa
elif isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
_lowercase : List[Any] = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
elif isinstance(__SCREAMING_SNAKE_CASE , torch.dtype ):
_lowercase : Optional[int] = bnb_abit_compute_dtype
else:
raise ValueError('bnb_4bit_compute_dtype must be a string or a torch.dtype' )
self.post_init()
def __a ( self ):
if not isinstance(self.llm_inta_threshold , __SCREAMING_SNAKE_CASE ):
raise ValueError('llm_int8_threshold must be a float' )
if self.llm_inta_skip_modules is not None and not isinstance(self.llm_inta_skip_modules , __SCREAMING_SNAKE_CASE ):
raise ValueError('llm_int8_skip_modules must be a list of strings' )
if not isinstance(self.llm_inta_enable_fpaa_cpu_offload , __SCREAMING_SNAKE_CASE ):
raise ValueError('llm_int8_enable_fp32_cpu_offload must be a boolean' )
if not isinstance(self.llm_inta_has_fpaa_weight , __SCREAMING_SNAKE_CASE ):
raise ValueError('llm_int8_has_fp16_weight must be a boolean' )
if self.bnb_abit_compute_dtype is not None and not isinstance(self.bnb_abit_compute_dtype , torch.dtype ):
raise ValueError('bnb_4bit_compute_dtype must be torch.dtype' )
if not isinstance(self.bnb_abit_quant_type , __SCREAMING_SNAKE_CASE ):
raise ValueError('bnb_4bit_quant_type must be a string' )
if not isinstance(self.bnb_abit_use_double_quant , __SCREAMING_SNAKE_CASE ):
raise ValueError('bnb_4bit_use_double_quant must be a boolean' )
if self.load_in_abit and not version.parse(importlib.metadata.version('bitsandbytes' ) ) >= version.parse(
'0.39.0' ):
raise ValueError(
'4 bit quantization requires bitsandbytes>=0.39.0 - please upgrade your bitsandbytes version' )
def __a ( self ):
return self.load_in_abit or self.load_in_abit
def __a ( self ):
if self.load_in_abit:
return "llm_int8"
elif self.load_in_abit and self.bnb_abit_quant_type == "fp4":
return "fp4"
elif self.load_in_abit and self.bnb_abit_quant_type == "nf4":
return "nf4"
else:
return None
@classmethod
def __a ( cls , _lowerCAmelCase , _lowerCAmelCase , **_lowerCAmelCase ):
_lowercase : List[Any] = cls(**__SCREAMING_SNAKE_CASE )
_lowercase : Optional[Any] = []
for key, value in kwargs.items():
if hasattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
setattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
to_remove.append(__SCREAMING_SNAKE_CASE )
for key in to_remove:
kwargs.pop(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if return_unused_kwargs:
return config, kwargs
else:
return config
def __a ( self , _lowerCAmelCase ):
with open(__SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as writer:
_lowercase : str = self.to_dict()
_lowercase : Any = json.dumps(__SCREAMING_SNAKE_CASE , indent=2 , sort_keys=__SCREAMING_SNAKE_CASE ) + '\n'
writer.write(__SCREAMING_SNAKE_CASE )
def __a ( self ):
_lowercase : Optional[int] = copy.deepcopy(self.__dict__ )
_lowercase : List[str] = str(output['bnb_4bit_compute_dtype'] ).split('.' )[1]
return output
def __repr__( self ):
return F"""{self.__class__.__name__} {self.to_json_string()}"""
def __a ( self , _lowerCAmelCase = True ):
if use_diff is True:
_lowercase : Dict = self.to_diff_dict()
else:
_lowercase : int = self.to_dict()
return json.dumps(__SCREAMING_SNAKE_CASE , indent=2 , sort_keys=__SCREAMING_SNAKE_CASE ) + "\n"
def __a ( self ):
_lowercase : List[Any] = self.to_dict()
# get the default config dict
_lowercase : int = BitsAndBytesConfig().to_dict()
_lowercase : Any = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if value != default_config_dict[key]:
_lowercase : Union[str, Any] = value
return serializable_config_dict
| 66 |
"""simple docstring"""
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class a ( __snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE : Optional[int] = DebertaTokenizer
SCREAMING_SNAKE_CASE : Optional[int] = True
SCREAMING_SNAKE_CASE : Any = DebertaTokenizerFast
def UpperCamelCase ( self : Optional[Any] ) -> Dict:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCamelCase_ = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'\u0120',
'\u0120l',
'\u0120n',
'\u0120lo',
'\u0120low',
'er',
'\u0120lowest',
'\u0120newer',
'\u0120wider',
'[UNK]',
]
lowerCamelCase_ = dict(zip(__SCREAMING_SNAKE_CASE , range(len(__SCREAMING_SNAKE_CASE ) ) ) )
lowerCamelCase_ = ['#version: 0.2', '\u0120 l', '\u0120l o', '\u0120lo w', 'e r', '']
lowerCamelCase_ = {'unk_token': '[UNK]'}
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCamelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(__SCREAMING_SNAKE_CASE ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(__SCREAMING_SNAKE_CASE ) )
def UpperCamelCase ( self : Tuple , **__SCREAMING_SNAKE_CASE : Dict ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : str , __SCREAMING_SNAKE_CASE : Any ) -> Dict:
lowerCamelCase_ = 'lower newer'
lowerCamelCase_ = 'lower newer'
return input_text, output_text
def UpperCamelCase ( self : Optional[Any] ) -> Optional[Any]:
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = 'lower newer'
lowerCamelCase_ = ['l', 'o', 'w', 'er', '\u0120', 'n', 'e', 'w', 'er']
lowerCamelCase_ = tokenizer.tokenize(__SCREAMING_SNAKE_CASE )
self.assertListEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokens + [tokenizer.unk_token]
lowerCamelCase_ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
lowerCamelCase_ = self.get_tokenizer()
lowerCamelCase_ = tokenizer('Hello' , 'World' )
lowerCamelCase_ = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['token_type_ids'] , __SCREAMING_SNAKE_CASE )
@slow
def UpperCamelCase ( self : Union[str, Any] ) -> Tuple:
lowerCamelCase_ = self.tokenizer_class.from_pretrained('microsoft/deberta-base' )
lowerCamelCase_ = tokenizer.encode('sequence builders' , add_special_tokens=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer.encode('multi-sequence build' , add_special_tokens=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer.encode(
'sequence builders' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer.encode(
'sequence builders' , 'multi-sequence build' , add_special_tokens=__SCREAMING_SNAKE_CASE , add_prefix_space=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = tokenizer.build_inputs_with_special_tokens(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def UpperCamelCase ( self : Optional[int] ) -> Optional[Any]:
lowerCamelCase_ = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
lowerCamelCase_ = tokenizer_class.from_pretrained('microsoft/deberta-base' )
lowerCamelCase_ = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
lowerCamelCase_ = tokenizer(__SCREAMING_SNAKE_CASE , padding=__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = [tokenizer.decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE ) for seq in encoding['input_ids']]
# fmt: off
lowerCamelCase_ = {
'input_ids': [
[1, 2118, 11126, 565, 35, 83, 25191, 163, 18854, 13, 12156, 12, 16101, 25376, 13807, 9, 22205, 27893, 1635, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2118, 11126, 565, 24536, 80, 43797, 4878, 7373, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 133, 78, 65, 16, 10, 3724, 1538, 33183, 11303, 43797, 1938, 4, 870, 24165, 29105, 5, 739, 32644, 33183, 11303, 36173, 88, 80, 650, 7821, 45940, 6, 52, 2559, 5, 1836, 9, 5, 7397, 13171, 31, 5, 1836, 9, 32644, 33183, 11303, 4, 2]
],
'token_type_ids': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'attention_mask': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
lowerCamelCase_ = [
'ALBERT: A Lite BERT for Self-supervised Learning of Language Representations',
'ALBERT incorporates two parameter reduction techniques',
'The first one is a factorized embedding parameterization. By decomposing the large vocabulary'
' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'
' vocabulary embedding.',
]
self.assertDictEqual(encoding.data , __SCREAMING_SNAKE_CASE )
for expected, decoded in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
| 549 | 0 |
from string import ascii_lowercase, ascii_uppercase
def lowerCAmelCase ( UpperCamelCase__ : str ) -> str:
"""simple docstring"""
if not sentence:
return ""
__SCREAMING_SNAKE_CASE: Optional[int] = dict(zip(UpperCamelCase__ , UpperCamelCase__ ) )
return lower_to_upper.get(sentence[0] , sentence[0] ) + sentence[1:]
if __name__ == "__main__":
from doctest import testmod
testmod()
| 146 |
from __future__ import annotations
from math import pow, sqrt
def lowerCAmelCase ( UpperCamelCase__ : float , UpperCamelCase__ : float , UpperCamelCase__ : float ) -> dict[str, float]:
"""simple docstring"""
if (resistance, reactance, impedance).count(0 ) != 1:
raise ValueError('''One and only one argument must be 0''' )
if resistance == 0:
return {"resistance": sqrt(pow(UpperCamelCase__ , 2 ) - pow(UpperCamelCase__ , 2 ) )}
elif reactance == 0:
return {"reactance": sqrt(pow(UpperCamelCase__ , 2 ) - pow(UpperCamelCase__ , 2 ) )}
elif impedance == 0:
return {"impedance": sqrt(pow(UpperCamelCase__ , 2 ) + pow(UpperCamelCase__ , 2 ) )}
else:
raise ValueError('''Exactly one argument must be 0''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 146 | 1 |
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[Any], _UpperCAmelCase : List[str], _UpperCAmelCase : Optional[int]=7, _UpperCAmelCase : int=3, _UpperCAmelCase : Union[str, Any]=1_8, _UpperCAmelCase : Dict=3_0, _UpperCAmelCase : Any=4_0_0, _UpperCAmelCase : str=True, _UpperCAmelCase : Tuple=None, _UpperCAmelCase : List[Any]=True, _UpperCAmelCase : Union[str, Any]=None, _UpperCAmelCase : Optional[Any]=True, _UpperCAmelCase : Any=[0.5, 0.5, 0.5], _UpperCAmelCase : Optional[int]=[0.5, 0.5, 0.5], _UpperCAmelCase : List[Any]=False, ) -> str:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = size if size is not None else {"height": 2_0, "width": 2_0}
SCREAMING_SNAKE_CASE__ : int = crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
SCREAMING_SNAKE_CASE__ : List[str] = parent
SCREAMING_SNAKE_CASE__ : Tuple = batch_size
SCREAMING_SNAKE_CASE__ : Tuple = num_channels
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_size
SCREAMING_SNAKE_CASE__ : Optional[int] = min_resolution
SCREAMING_SNAKE_CASE__ : Optional[Any] = max_resolution
SCREAMING_SNAKE_CASE__ : Any = do_resize
SCREAMING_SNAKE_CASE__ : Tuple = size
SCREAMING_SNAKE_CASE__ : Union[str, Any] = do_center_crop
SCREAMING_SNAKE_CASE__ : Tuple = crop_size
SCREAMING_SNAKE_CASE__ : Optional[int] = do_normalize
SCREAMING_SNAKE_CASE__ : Dict = image_mean
SCREAMING_SNAKE_CASE__ : Optional[int] = image_std
SCREAMING_SNAKE_CASE__ : Any = do_reduce_labels
def A_ ( self : Tuple ) -> str:
"""simple docstring"""
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_reduce_labels": self.do_reduce_labels,
}
def _a ( ) -> Any:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Dict = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
SCREAMING_SNAKE_CASE__ : str = Image.open(dataset[0]["file"] )
SCREAMING_SNAKE_CASE__ : int = Image.open(dataset[1]["file"] )
return image, map
def _a ( ) -> str:
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : Tuple = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
SCREAMING_SNAKE_CASE__ : int = Image.open(ds[0]["file"] )
SCREAMING_SNAKE_CASE__ : Any = Image.open(ds[1]["file"] )
SCREAMING_SNAKE_CASE__ : List[Any] = Image.open(ds[2]["file"] )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = Image.open(ds[3]["file"] )
return [imagea, imagea], [mapa, mapa]
@require_torch
@require_vision
class lowerCamelCase (__lowerCamelCase , unittest.TestCase ):
"""simple docstring"""
UpperCAmelCase_ = BeitImageProcessor if is_vision_available() else None
def A_ ( self : Optional[Any] ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = BeitImageProcessingTester(self )
@property
def A_ ( self : Any ) -> Optional[int]:
"""simple docstring"""
return self.image_processor_tester.prepare_image_processor_dict()
def A_ ( self : Any ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_UpperCAmelCase, "do_resize" ) )
self.assertTrue(hasattr(_UpperCAmelCase, "size" ) )
self.assertTrue(hasattr(_UpperCAmelCase, "do_center_crop" ) )
self.assertTrue(hasattr(_UpperCAmelCase, "center_crop" ) )
self.assertTrue(hasattr(_UpperCAmelCase, "do_normalize" ) )
self.assertTrue(hasattr(_UpperCAmelCase, "image_mean" ) )
self.assertTrue(hasattr(_UpperCAmelCase, "image_std" ) )
def A_ ( self : str ) -> Tuple:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[int] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {"height": 2_0, "width": 2_0} )
self.assertEqual(image_processor.crop_size, {"height": 1_8, "width": 1_8} )
self.assertEqual(image_processor.do_reduce_labels, _UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : int = self.image_processing_class.from_dict(
self.image_processor_dict, size=4_2, crop_size=8_4, reduce_labels=_UpperCAmelCase )
self.assertEqual(image_processor.size, {"height": 4_2, "width": 4_2} )
self.assertEqual(image_processor.crop_size, {"height": 8_4, "width": 8_4} )
self.assertEqual(image_processor.do_reduce_labels, _UpperCAmelCase )
def A_ ( self : int ) -> Any:
"""simple docstring"""
pass
def A_ ( self : Dict ) -> Tuple:
"""simple docstring"""
# Initialize image_processing
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
SCREAMING_SNAKE_CASE__ : List[Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase, Image.Image )
# Test not batched input
SCREAMING_SNAKE_CASE__ : List[str] = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
# Test batched
SCREAMING_SNAKE_CASE__ : Optional[int] = image_processing(_UpperCAmelCase, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
def A_ ( self : int ) -> Optional[int]:
"""simple docstring"""
# Initialize image_processing
SCREAMING_SNAKE_CASE__ : str = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_image_inputs(self.image_processor_tester, equal_resolution=_UpperCAmelCase, numpify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase, np.ndarray )
# Test not batched input
SCREAMING_SNAKE_CASE__ : str = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
# Test batched
SCREAMING_SNAKE_CASE__ : List[str] = image_processing(_UpperCAmelCase, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
def A_ ( self : List[Any] ) -> Dict:
"""simple docstring"""
# Initialize image_processing
SCREAMING_SNAKE_CASE__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : Dict = prepare_image_inputs(self.image_processor_tester, equal_resolution=_UpperCAmelCase, torchify=_UpperCAmelCase )
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase, torch.Tensor )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Dict = image_processing(image_inputs[0], return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
# Test batched
SCREAMING_SNAKE_CASE__ : List[Any] = image_processing(_UpperCAmelCase, return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
def A_ ( self : Tuple ) -> str:
"""simple docstring"""
# Initialize image_processing
SCREAMING_SNAKE_CASE__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
SCREAMING_SNAKE_CASE__ : Dict = prepare_image_inputs(self.image_processor_tester, equal_resolution=_UpperCAmelCase, torchify=_UpperCAmelCase )
SCREAMING_SNAKE_CASE__ : str = []
for image in image_inputs:
self.assertIsInstance(_UpperCAmelCase, torch.Tensor )
maps.append(torch.zeros(image.shape[-2:] ).long() )
# Test not batched input
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_processing(image_inputs[0], maps[0], return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
self.assertEqual(
encoding["labels"].shape, (
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
self.assertEqual(encoding["labels"].dtype, torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_5_5 )
# Test batched
SCREAMING_SNAKE_CASE__ : int = image_processing(_UpperCAmelCase, _UpperCAmelCase, return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
self.assertEqual(
encoding["labels"].shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
self.assertEqual(encoding["labels"].dtype, torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_5_5 )
# Test not batched input (PIL images)
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Union[str, Any] = prepare_semantic_single_inputs()
SCREAMING_SNAKE_CASE__ : Union[str, Any] = image_processing(_UpperCAmelCase, _UpperCAmelCase, return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
self.assertEqual(
encoding["labels"].shape, (
1,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
self.assertEqual(encoding["labels"].dtype, torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_5_5 )
# Test batched input (PIL images)
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : int = prepare_semantic_batch_inputs()
SCREAMING_SNAKE_CASE__ : Tuple = image_processing(_UpperCAmelCase, _UpperCAmelCase, return_tensors="pt" )
self.assertEqual(
encoding["pixel_values"].shape, (
2,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
self.assertEqual(
encoding["labels"].shape, (
2,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
), )
self.assertEqual(encoding["labels"].dtype, torch.long )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_5_5 )
def A_ ( self : Dict ) -> List[str]:
"""simple docstring"""
# Initialize image_processing
SCREAMING_SNAKE_CASE__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# ADE20k has 150 classes, and the background is included, so labels should be between 0 and 150
SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ : Optional[int] = prepare_semantic_single_inputs()
SCREAMING_SNAKE_CASE__ : Any = image_processing(_UpperCAmelCase, _UpperCAmelCase, return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 1_5_0 )
SCREAMING_SNAKE_CASE__ : List[Any] = True
SCREAMING_SNAKE_CASE__ : Tuple = image_processing(_UpperCAmelCase, _UpperCAmelCase, return_tensors="pt" )
self.assertTrue(encoding["labels"].min().item() >= 0 )
self.assertTrue(encoding["labels"].max().item() <= 2_5_5 )
| 663 |
import unittest
from datasets import load_dataset
from transformers.pipelines import pipeline
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_torch, slow
@is_pipeline_test
@require_torch
class lowerCamelCase (unittest.TestCase ):
"""simple docstring"""
@require_torch
def A_ ( self : Any ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = pipeline(
task="zero-shot-audio-classification", model="hf-internal-testing/tiny-clap-htsat-unfused" )
SCREAMING_SNAKE_CASE__ : int = load_dataset("ashraq/esc50" )
SCREAMING_SNAKE_CASE__ : int = dataset["train"]["audio"][-1]["array"]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = audio_classifier(_UpperCAmelCase, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(_UpperCAmelCase ), [{"score": 0.501, "label": "Sound of a dog"}, {"score": 0.499, "label": "Sound of vaccum cleaner"}], )
@unittest.skip("No models are available in TF" )
def A_ ( self : str ) -> Dict:
"""simple docstring"""
pass
@slow
@require_torch
def A_ ( self : str ) -> List[str]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Optional[Any] = pipeline(
task="zero-shot-audio-classification", model="laion/clap-htsat-unfused", )
# This is an audio of a dog
SCREAMING_SNAKE_CASE__ : List[str] = load_dataset("ashraq/esc50" )
SCREAMING_SNAKE_CASE__ : Optional[Any] = dataset["train"]["audio"][-1]["array"]
SCREAMING_SNAKE_CASE__ : Optional[Any] = audio_classifier(_UpperCAmelCase, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(_UpperCAmelCase ), [
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
], )
SCREAMING_SNAKE_CASE__ : Any = audio_classifier([audio] * 5, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"] )
self.assertEqual(
nested_simplify(_UpperCAmelCase ), [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5, )
SCREAMING_SNAKE_CASE__ : Any = audio_classifier(
[audio] * 5, candidate_labels=["Sound of a dog", "Sound of vaccum cleaner"], batch_size=5 )
self.assertEqual(
nested_simplify(_UpperCAmelCase ), [
[
{"score": 0.999, "label": "Sound of a dog"},
{"score": 0.001, "label": "Sound of vaccum cleaner"},
],
]
* 5, )
@unittest.skip("No models are available in TF" )
def A_ ( self : str ) -> List[str]:
"""simple docstring"""
pass
| 663 | 1 |
import argparse
import re
from flax.traverse_util import flatten_dict, unflatten_dict
from tax import checkpoints
from transformers import SwitchTransformersConfig, SwitchTransformersForConditionalGeneration
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
from transformers.utils import logging
logging.set_verbosity_info()
# should not include what is already done by the `from_pt` argument
_lowerCAmelCase : Union[str, Any] = {
"/attention/": "/0/SelfAttention/",
"/self_attention/": "/0/SelfAttention/",
"/encoder_decoder_attention/": "/1/EncDecAttention/",
"value": "v",
"query": "q",
"key": "k",
"out": "o",
"pre_self_attention_layer_norm": "0/layer_norm",
"pre_cross_attention_layer_norm": "1/layer_norm",
"pre_attention_layer_norm": "0/layer_norm", # previously 1, but seems wrong
"token_embedder": "shared",
"encoder_norm": "final_layer_norm",
"decoder_norm": "final_layer_norm",
"relpos_bias/rel_embedding": "block/0/layer/0/SelfAttention/relative_attention_bias/weight",
"router/router_weights/w/": "router/classifier/",
"roer/roer_weights/w/": "router/classifier/",
"logits_dense": "lm_head",
}
def UpperCAmelCase_ ( snake_case__ ) -> List[str]:
"""simple docstring"""
lowerCAmelCase__ = list(s_dict.keys() )
for key in keys:
lowerCAmelCase__ = R'.*/layers_(\d+)'
lowerCAmelCase__ = key
if re.match(snake_case__ , snake_case__ ):
lowerCAmelCase__ = re.sub(R'layers_(\d+)' , R'block/\1/layer' , snake_case__ )
lowerCAmelCase__ = R'(encoder|decoder)\/'
if re.match(snake_case__ , snake_case__ ):
lowerCAmelCase__ = re.match(snake_case__ , snake_case__ ).groups()
if groups[0] == "encoder":
lowerCAmelCase__ = re.sub(R'/mlp/' , R'/1/mlp/' , snake_case__ )
lowerCAmelCase__ = re.sub(R'/pre_mlp_layer_norm/' , R'/1/layer_norm/' , snake_case__ )
elif groups[0] == "decoder":
lowerCAmelCase__ = re.sub(R'/mlp/' , R'/2/mlp/' , snake_case__ )
lowerCAmelCase__ = re.sub(R'/pre_mlp_layer_norm/' , R'/2/layer_norm/' , snake_case__ )
# 2. Convert other classic mappings
for old_key, temp_key in MOE_LAYER_NAME_MAPPING.items():
if old_key in new_key:
lowerCAmelCase__ = new_key.replace(snake_case__ , snake_case__ )
print(f'{key} -> {new_key}' )
lowerCAmelCase__ = s_dict.pop(snake_case__ )
if "encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowerCAmelCase__ = s_dict[
'encoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
if "decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight" in s_dict:
lowerCAmelCase__ = s_dict[
'decoder/block/0/layer/0/SelfAttention/relative_attention_bias/weight'
].T
# 3. Take extra care of the EXPERTS layer
for key in list(s_dict.keys() ):
if "expert" in key:
lowerCAmelCase__ = s_dict[key].shape[0]
lowerCAmelCase__ = s_dict[key]
for idx in range(snake_case__ ):
lowerCAmelCase__ = expert_weihts[idx]
print(f'{key} -> {key.replace("expert/" , "nested fstring" )}' )
s_dict.pop(snake_case__ )
return s_dict
_lowerCAmelCase : Union[str, Any] = {
"NUM_ENCODER_LAYERS": "num_layers",
"NUM_DECODER_LAYERS": "num_decoder_layers",
"NUM_HEADS": "num_heads",
"HEAD_DIM": "d_kv",
"EMBED_DIM": "d_model",
"MLP_DIM": "d_ff",
"NUM_SELECTED_EXPERTS": "num_selected_experts",
"NUM_ENCODER_SPARSE_LAYERS": "num_sparse_encoder_layers",
"NUM_DECODER_SPARSE_LAYERS": "num_sparse_decoder_layers",
"dense.MlpBlock.activations": "feed_forward_proj",
}
def UpperCAmelCase_ ( snake_case__ , snake_case__ ) -> List[str]:
"""simple docstring"""
import regex as re
with open(snake_case__ , 'r' ) as f:
lowerCAmelCase__ = f.read()
lowerCAmelCase__ = re.findall(R'(.*) = ([0-9.]*)' , snake_case__ )
lowerCAmelCase__ = {}
for param, value in regex_match:
if param in GIN_TO_CONFIG_MAPPING and value != "":
lowerCAmelCase__ = float(snake_case__ ) if '.' in value else int(snake_case__ )
lowerCAmelCase__ = re.findall(R'(.*activations) = \(\'(.*)\',\)' , snake_case__ )[0]
lowerCAmelCase__ = str(activation[1] )
lowerCAmelCase__ = num_experts
lowerCAmelCase__ = SwitchTransformersConfig(**snake_case__ )
return config
def UpperCAmelCase_ ( snake_case__ , snake_case__ , snake_case__=None , snake_case__="./" , snake_case__=8 ) -> List[str]:
"""simple docstring"""
print(f'Loading flax weights from : {flax_checkpoint_path}' )
lowerCAmelCase__ = checkpoints.load_tax_checkpoint(snake_case__ )
if gin_file is not None:
lowerCAmelCase__ = convert_gin_to_config(snake_case__ , snake_case__ )
else:
lowerCAmelCase__ = SwitchTransformersConfig.from_pretrained(snake_case__ )
lowerCAmelCase__ = SwitchTransformersForConditionalGeneration(snake_case__ )
lowerCAmelCase__ = flax_params['target']
lowerCAmelCase__ = flatten_dict(snake_case__ , sep='/' )
lowerCAmelCase__ = rename_keys(snake_case__ )
lowerCAmelCase__ = unflatten_dict(snake_case__ , sep='/' )
# Load the flax params in the PT model
load_flax_weights_in_pytorch_model(snake_case__ , snake_case__ )
print(f'Save PyTorch model to {pytorch_dump_path}' )
pt_model.save_pretrained(snake_case__ )
if __name__ == "__main__":
_lowerCAmelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--switch_t5x_checkpoint_path",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained SwitchTransformers model. \nThis specifies the"
" model architecture. If not provided, a `gin_file` has to be provided."
),
)
parser.add_argument(
"--gin_file",
default=None,
type=str,
required=False,
help="Path to the gin config file. If not provided, a `config_file` has to be passed ",
)
parser.add_argument(
"--config_name", default=None, type=str, required=False, help="Config name of SwitchTransformers model."
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output pytorch model."
)
parser.add_argument("--num_experts", default=8, type=int, required=False, help="Number of experts")
_lowerCAmelCase : Optional[Any] = parser.parse_args()
convert_flax_checkpoint_to_pytorch(
args.switch_tax_checkpoint_path,
args.config_name,
args.gin_file,
args.pytorch_dump_folder_path,
args.num_experts,
)
| 715 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __snake_case :
def __init__( self ,a_ ,a_=13 ,a_=32 ,a_=2 ,a_=3 ,a_=16 ,a_=[1, 2, 1] ,a_=[2, 2, 4] ,a_=2 ,a_=2.0 ,a_=True ,a_=0.0 ,a_=0.0 ,a_=0.1 ,a_="gelu" ,a_=False ,a_=True ,a_=0.02 ,a_=1e-5 ,a_=True ,a_=None ,a_=True ,a_=10 ,a_=8 ,):
"""simple docstring"""
lowerCAmelCase__ = parent
lowerCAmelCase__ = batch_size
lowerCAmelCase__ = image_size
lowerCAmelCase__ = patch_size
lowerCAmelCase__ = num_channels
lowerCAmelCase__ = embed_dim
lowerCAmelCase__ = depths
lowerCAmelCase__ = num_heads
lowerCAmelCase__ = window_size
lowerCAmelCase__ = mlp_ratio
lowerCAmelCase__ = qkv_bias
lowerCAmelCase__ = hidden_dropout_prob
lowerCAmelCase__ = attention_probs_dropout_prob
lowerCAmelCase__ = drop_path_rate
lowerCAmelCase__ = hidden_act
lowerCAmelCase__ = use_absolute_embeddings
lowerCAmelCase__ = patch_norm
lowerCAmelCase__ = layer_norm_eps
lowerCAmelCase__ = initializer_range
lowerCAmelCase__ = is_training
lowerCAmelCase__ = scope
lowerCAmelCase__ = use_labels
lowerCAmelCase__ = type_sequence_label_size
lowerCAmelCase__ = encoder_stride
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ = None
if self.use_labels:
lowerCAmelCase__ = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
lowerCAmelCase__ = self.get_config()
return config, pixel_values, labels
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return SwinvaConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = SwinvaModel(config=a_ )
model.to(a_ )
model.eval()
lowerCAmelCase__ = model(a_ )
lowerCAmelCase__ = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCAmelCase__ = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = SwinvaForMaskedImageModeling(config=a_ )
model.to(a_ )
model.eval()
lowerCAmelCase__ = model(a_ )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCAmelCase__ = 1
lowerCAmelCase__ = SwinvaForMaskedImageModeling(a_ )
model.to(a_ )
model.eval()
lowerCAmelCase__ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ = model(a_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = self.type_sequence_label_size
lowerCAmelCase__ = SwinvaForImageClassification(a_ )
model.to(a_ )
model.eval()
lowerCAmelCase__ = model(a_ ,labels=a_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = config_and_inputs
lowerCAmelCase__ = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __snake_case ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
SCREAMING_SNAKE_CASE__ = (
{'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = SwinvaModelTester(self )
lowerCAmelCase__ = ConfigTester(self ,config_class=a_ ,embed_dim=37 )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a_ )
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(a_ )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
lowerCAmelCase__ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a_ ,nn.Linear ) )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(a_ )
lowerCAmelCase__ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ = [*signature.parameters.keys()]
lowerCAmelCase__ = ['pixel_values']
self.assertListEqual(arg_names[:1] ,a_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = True
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
lowerCAmelCase__ = False
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(a_ ,a_ ) )
lowerCAmelCase__ = outputs.attentions
lowerCAmelCase__ = len(self.model_tester.depths )
self.assertEqual(len(a_ ) ,a_ )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase__ = True
lowerCAmelCase__ = config.window_size**2
lowerCAmelCase__ = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(a_ ,a_ ) )
lowerCAmelCase__ = outputs.attentions
self.assertEqual(len(a_ ) ,a_ )
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
lowerCAmelCase__ = len(a_ )
# Check attention is always last and order is fine
lowerCAmelCase__ = True
lowerCAmelCase__ = True
lowerCAmelCase__ = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(a_ ,a_ ) )
if hasattr(self.model_tester ,'num_hidden_states_types' ):
lowerCAmelCase__ = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
lowerCAmelCase__ = 2
self.assertEqual(out_len + added_hidden_states ,len(a_ ) )
lowerCAmelCase__ = outputs.attentions
self.assertEqual(len(a_ ) ,a_ )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_heads[0], window_size_squared, window_size_squared] ,)
def SCREAMING_SNAKE_CASE_ ( self ,a_ ,a_ ,a_ ,a_ ):
"""simple docstring"""
lowerCAmelCase__ = model_class(a_ )
model.to(a_ )
model.eval()
with torch.no_grad():
lowerCAmelCase__ = model(**self._prepare_for_class(a_ ,a_ ) )
lowerCAmelCase__ = outputs.hidden_states
lowerCAmelCase__ = getattr(
self.model_tester ,'expected_num_hidden_layers' ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(a_ ) ,a_ )
# Swinv2 has a different seq_length
lowerCAmelCase__ = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase__ = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
lowerCAmelCase__ = outputs.reshaped_hidden_states
self.assertEqual(len(a_ ) ,a_ )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = reshaped_hidden_states[0].shape
lowerCAmelCase__ = (
reshaped_hidden_states[0].view(a_ ,a_ ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
self.check_hidden_states_output(a_ ,a_ ,a_ ,a_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ = True
self.check_hidden_states_output(a_ ,a_ ,a_ ,a_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = 3
lowerCAmelCase__ = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCAmelCase__ = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase__ = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCAmelCase__ = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowerCAmelCase__ = True
self.check_hidden_states_output(a_ ,a_ ,a_ ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ = True
self.check_hidden_states_output(a_ ,a_ ,a_ ,(padded_height, padded_width) )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a_ )
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ = SwinvaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ , lowerCAmelCase__ = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ = _config_zero_init(a_ )
for model_class in self.all_model_classes:
lowerCAmelCase__ = model_class(config=a_ )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=f'Parameter {name} of model {model_class} seems not properly initialized' ,)
@require_vision
@require_torch
class __snake_case ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' )
if is_vision_available()
else None
)
@slow
def SCREAMING_SNAKE_CASE_ ( self ):
"""simple docstring"""
lowerCAmelCase__ = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to(
a_ )
lowerCAmelCase__ = self.default_image_processor
lowerCAmelCase__ = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCAmelCase__ = image_processor(images=a_ ,return_tensors='pt' ).to(a_ )
# forward pass
with torch.no_grad():
lowerCAmelCase__ = model(**a_ )
# verify the logits
lowerCAmelCase__ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,a_ )
lowerCAmelCase__ = torch.tensor([-0.3947, -0.4306, 0.0026] ).to(a_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,a_ ,atol=1e-4 ) )
| 604 | 0 |
'''simple docstring'''
def _lowercase ( lowerCamelCase__ : int ):
if not isinstance(lowerCamelCase__, lowerCamelCase__ ):
raise TypeError("only integers accepted as input" )
else:
_a = str(abs(lowerCamelCase__ ) )
_a = [list(lowerCamelCase__ ) for char in range(len(lowerCamelCase__ ) )]
for index in range(len(lowerCamelCase__ ) ):
num_transpositions[index].pop(lowerCamelCase__ )
return max(
int("".join(list(lowerCamelCase__ ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 131 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def __lowerCAmelCase ( self ) -> List[Any]:
_a = 1
_a = 3
_a = (3_2, 3_2)
_a = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(snake_case_ )
return image
@property
def __lowerCAmelCase ( self ) -> int:
torch.manual_seed(0 )
_a = UNetaDConditionModel(
block_out_channels=(3_2, 3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=7 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , attention_head_dim=8 , use_linear_projection=snake_case_ , only_cross_attention=(True, True, False) , num_class_embeds=1_0_0 , )
return model
@property
def __lowerCAmelCase ( self ) -> List[Any]:
torch.manual_seed(0 )
_a = AutoencoderKL(
block_out_channels=[3_2, 3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def __lowerCAmelCase ( self ) -> Optional[int]:
torch.manual_seed(0 )
_a = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="gelu" , projection_dim=5_1_2 , )
return CLIPTextModel(snake_case_ )
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = "cpu" # ensure determinism for the device-dependent torch.Generator
_a = self.dummy_cond_unet_upscale
_a = DDPMScheduler()
_a = DDIMScheduler(prediction_type="v_prediction" )
_a = self.dummy_vae
_a = self.dummy_text_encoder
_a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_a = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a = Image.fromarray(np.uinta(snake_case_ ) ).convert("RGB" ).resize((6_4, 6_4) )
# make sure here that pndm scheduler skips prk
_a = StableDiffusionUpscalePipeline(
unet=snake_case_ , low_res_scheduler=snake_case_ , scheduler=snake_case_ , vae=snake_case_ , text_encoder=snake_case_ , tokenizer=snake_case_ , max_noise_level=3_5_0 , )
_a = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
_a = "A painting of a squirrel eating a burger"
_a = torch.Generator(device=snake_case_ ).manual_seed(0 )
_a = sd_pipe(
[prompt] , image=snake_case_ , generator=snake_case_ , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , )
_a = output.images
_a = torch.Generator(device=snake_case_ ).manual_seed(0 )
_a = sd_pipe(
[prompt] , image=snake_case_ , generator=snake_case_ , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , return_dict=snake_case_ , )[0]
_a = image[0, -3:, -3:, -1]
_a = image_from_tuple[0, -3:, -3:, -1]
_a = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
_a = np.array([0.3_113, 0.3_910, 0.4_272, 0.4_859, 0.5_061, 0.4_652, 0.5_362, 0.5_715, 0.5_661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCAmelCase ( self ) -> int:
_a = "cpu" # ensure determinism for the device-dependent torch.Generator
_a = self.dummy_cond_unet_upscale
_a = DDPMScheduler()
_a = DDIMScheduler(prediction_type="v_prediction" )
_a = self.dummy_vae
_a = self.dummy_text_encoder
_a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_a = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a = Image.fromarray(np.uinta(snake_case_ ) ).convert("RGB" ).resize((6_4, 6_4) )
# make sure here that pndm scheduler skips prk
_a = StableDiffusionUpscalePipeline(
unet=snake_case_ , low_res_scheduler=snake_case_ , scheduler=snake_case_ , vae=snake_case_ , text_encoder=snake_case_ , tokenizer=snake_case_ , max_noise_level=3_5_0 , )
_a = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
_a = "A painting of a squirrel eating a burger"
_a = sd_pipe(
2 * [prompt] , image=2 * [low_res_image] , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , )
_a = output.images
assert image.shape[0] == 2
_a = torch.Generator(device=snake_case_ ).manual_seed(0 )
_a = sd_pipe(
[prompt] , image=snake_case_ , generator=snake_case_ , num_images_per_prompt=2 , guidance_scale=6.0 , noise_level=2_0 , num_inference_steps=2 , output_type="np" , )
_a = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def __lowerCAmelCase ( self ) -> Any:
_a = self.dummy_cond_unet_upscale
_a = DDPMScheduler()
_a = DDIMScheduler(prediction_type="v_prediction" )
_a = self.dummy_vae
_a = self.dummy_text_encoder
_a = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
_a = self.dummy_image.cpu().permute(0 , 2 , 3 , 1 )[0]
_a = Image.fromarray(np.uinta(snake_case_ ) ).convert("RGB" ).resize((6_4, 6_4) )
# put models in fp16, except vae as it overflows in fp16
_a = unet.half()
_a = text_encoder.half()
# make sure here that pndm scheduler skips prk
_a = StableDiffusionUpscalePipeline(
unet=snake_case_ , low_res_scheduler=snake_case_ , scheduler=snake_case_ , vae=snake_case_ , text_encoder=snake_case_ , tokenizer=snake_case_ , max_noise_level=3_5_0 , )
_a = sd_pipe.to(snake_case_ )
sd_pipe.set_progress_bar_config(disable=snake_case_ )
_a = "A painting of a squirrel eating a burger"
_a = torch.manual_seed(0 )
_a = sd_pipe(
[prompt] , image=snake_case_ , generator=snake_case_ , num_inference_steps=2 , output_type="np" , ).images
_a = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class A ( unittest.TestCase ):
def __lowerCAmelCase ( self ) -> Optional[Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCAmelCase ( self ) -> Optional[int]:
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat.npy" )
_a = "stabilityai/stable-diffusion-x4-upscaler"
_a = StableDiffusionUpscalePipeline.from_pretrained(snake_case_ )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
_a = "a cat sitting on a park bench"
_a = torch.manual_seed(0 )
_a = pipe(
prompt=snake_case_ , image=snake_case_ , generator=snake_case_ , output_type="np" , )
_a = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def __lowerCAmelCase ( self ) -> Optional[Any]:
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_a = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"
"/upsampled_cat_fp16.npy" )
_a = "stabilityai/stable-diffusion-x4-upscaler"
_a = StableDiffusionUpscalePipeline.from_pretrained(
snake_case_ , torch_dtype=torch.floataa , )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing()
_a = "a cat sitting on a park bench"
_a = torch.manual_seed(0 )
_a = pipe(
prompt=snake_case_ , image=snake_case_ , generator=snake_case_ , output_type="np" , )
_a = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def __lowerCAmelCase ( self ) -> Optional[Any]:
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
_a = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/sd2-upscale/low_res_cat.png" )
_a = "stabilityai/stable-diffusion-x4-upscaler"
_a = StableDiffusionUpscalePipeline.from_pretrained(
snake_case_ , torch_dtype=torch.floataa , )
pipe.to(snake_case_ )
pipe.set_progress_bar_config(disable=snake_case_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
_a = "a cat sitting on a park bench"
_a = torch.manual_seed(0 )
_a = pipe(
prompt=snake_case_ , image=snake_case_ , generator=snake_case_ , num_inference_steps=5 , output_type="np" , )
_a = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 1_0**9
| 131 | 1 |
"""simple docstring"""
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class __SCREAMING_SNAKE_CASE ( yaml.SafeLoader ):
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : Tuple ) -> int:
_UpperCamelCase : Dict = [self.constructed_objects[key_node] for key_node, _ in node.value]
_UpperCamelCase : Optional[Any] = [tuple(UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ) else key for key in keys]
_UpperCamelCase : Optional[int] = Counter(UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(F'''Got duplicate yaml keys: {duplicate_keys}''' )
def __SCREAMING_SNAKE_CASE ( self : str , __a : Optional[int] , __a : List[str]=False ) -> Tuple:
_UpperCamelCase : str = super().construct_mapping(UpperCAmelCase_ , deep=UpperCAmelCase_ )
self._check_no_duplicates_on_constructed_node(UpperCAmelCase_ )
return mapping
def lowercase__ ( lowercase_ ) -> int:
"""simple docstring"""
_UpperCamelCase : Tuple = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
_UpperCamelCase : Union[str, Any] = full_content[1:].index("---" ) + 1
_UpperCamelCase : Dict = "\n".join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(_snake_case )
class __SCREAMING_SNAKE_CASE ( __UpperCAmelCase ):
SCREAMING_SNAKE_CASE__ :Any = {"""train_eval_index"""} # train-eval-index in the YAML metadata
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : Tuple , __a : Optional[int] ) -> Union[str, Any]:
with open(UpperCAmelCase_ , encoding="utf-8" ) as readme_file:
_UpperCamelCase, _UpperCamelCase : List[Any] = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(UpperCAmelCase_ )
else:
return cls()
def __SCREAMING_SNAKE_CASE ( self : str , __a : Any ) -> Dict:
if path.exists():
with open(UpperCAmelCase_ , encoding="utf-8" ) as readme_file:
_UpperCamelCase : List[str] = readme_file.read()
else:
_UpperCamelCase : Dict = None
_UpperCamelCase : str = self._to_readme(UpperCAmelCase_ )
with open(UpperCAmelCase_ , "w" , encoding="utf-8" ) as readme_file:
readme_file.write(UpperCAmelCase_ )
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] , __a : List[Any] = None ) -> Optional[int]:
if readme_content is not None:
_UpperCamelCase, _UpperCamelCase : Optional[int] = _split_yaml_from_readme(UpperCAmelCase_ )
_UpperCamelCase : Optional[Any] = "---\n" + self.to_yaml_string() + "---\n" + content
else:
_UpperCamelCase : Union[str, Any] = "---\n" + self.to_yaml_string() + "---\n"
return full_content
@classmethod
def __SCREAMING_SNAKE_CASE ( cls : List[Any] , __a : List[Any] ) -> int:
_UpperCamelCase : List[str] = yaml.load(UpperCAmelCase_ , Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
_UpperCamelCase : Union[str, Any] = {
(key.replace("-" , "_" ) if key.replace("-" , "_" ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**UpperCAmelCase_ )
def __SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
return yaml.safe_dump(
{
(key.replace("_" , "-" ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} , sort_keys=UpperCAmelCase_ , allow_unicode=UpperCAmelCase_ , encoding="utf-8" , ).decode("utf-8" )
lowerCamelCase__ = {
"image-classification": [],
"translation": [],
"image-segmentation": [],
"fill-mask": [],
"automatic-speech-recognition": [],
"token-classification": [],
"sentence-similarity": [],
"audio-classification": [],
"question-answering": [],
"summarization": [],
"zero-shot-classification": [],
"table-to-text": [],
"feature-extraction": [],
"other": [],
"multiple-choice": [],
"text-classification": [],
"text-to-image": [],
"text2text-generation": [],
"zero-shot-image-classification": [],
"tabular-classification": [],
"tabular-regression": [],
"image-to-image": [],
"tabular-to-text": [],
"unconditional-image-generation": [],
"text-retrieval": [],
"text-to-speech": [],
"object-detection": [],
"audio-to-audio": [],
"text-generation": [],
"conversational": [],
"table-question-answering": [],
"visual-question-answering": [],
"image-to-text": [],
"reinforcement-learning": [],
"voice-activity-detection": [],
"time-series-forecasting": [],
"document-question-answering": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
lowerCamelCase__ = ArgumentParser(usage="Validate the yaml metadata block of a README.md file.")
ap.add_argument("readme_filepath")
lowerCamelCase__ = ap.parse_args()
lowerCamelCase__ = Path(args.readme_filepath)
lowerCamelCase__ = DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 711 |
"""simple docstring"""
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Features, Value
from .base import TaskTemplate
@dataclass(frozen=_UpperCamelCase )
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :str = field(default="summarization" , metadata={"include_in_asdict_even_if_is_default": True} )
SCREAMING_SNAKE_CASE__ :ClassVar[Features] = Features({"text": Value("string" )} )
SCREAMING_SNAKE_CASE__ :ClassVar[Features] = Features({"summary": Value("string" )} )
SCREAMING_SNAKE_CASE__ :str = "text"
SCREAMING_SNAKE_CASE__ :str = "summary"
@property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict[str, str]:
return {self.text_column: "text", self.summary_column: "summary"}
| 51 | 0 |
"""simple docstring"""
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
__SCREAMING_SNAKE_CASE = random.Random()
def __a ( a, a=1.0, a=None, a=None ):
"""simple docstring"""
if rng is None:
_a = global_rng
_a = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __snake_case ( unittest.TestCase ):
"""simple docstring"""
def __init__( self :Optional[Any] , UpperCamelCase__ :Tuple , UpperCamelCase__ :Any=7 , UpperCamelCase__ :Dict=400 , UpperCamelCase__ :Union[str, Any]=2_000 , UpperCamelCase__ :Dict=24 , UpperCamelCase__ :Optional[Any]=24 , UpperCamelCase__ :List[str]=0.0 , UpperCamelCase__ :Optional[Any]=16_000 , UpperCamelCase__ :int=True , UpperCamelCase__ :List[str]=True , ):
_a = parent
_a = batch_size
_a = min_seq_length
_a = max_seq_length
_a = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
_a = feature_size
_a = num_mel_bins
_a = padding_value
_a = sampling_rate
_a = return_attention_mask
_a = do_normalize
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] , UpperCamelCase__ :Tuple=False , UpperCamelCase__ :Union[str, Any]=False ):
def _flatten(UpperCamelCase__ :Tuple ):
return list(itertools.chain(*UpperCamelCase__ ) )
if equal_length:
_a = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
_a = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
_a = [np.asarray(UpperCamelCase__ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __snake_case ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase_ : Any = SpeechaTextFeatureExtractor if is_speech_available() else None
def SCREAMING_SNAKE_CASE_ ( self :str ):
_a = SpeechaTextFeatureExtractionTester(self )
def SCREAMING_SNAKE_CASE_ ( self :List[Any] , UpperCamelCase__ :Any ):
self.assertTrue(np.all(np.mean(UpperCamelCase__ , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(UpperCamelCase__ , axis=0 ) - 1 ) < 1E-3 ) )
def SCREAMING_SNAKE_CASE_ ( self :Dict ):
# Tests that all call wrap to encode_plus and batch_encode_plus
_a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
_a = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
_a = [np.asarray(UpperCamelCase__ ) for speech_input in speech_inputs]
# Test feature size
_a = feature_extractor(UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
_a = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
_a = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-3 ) )
# Test batched
_a = feature_extractor(UpperCamelCase__ , return_tensors="np" ).input_features
_a = feature_extractor(UpperCamelCase__ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
_a = [floats_list((1, x) )[0] for x in (800, 800, 800)]
_a = np.asarray(UpperCamelCase__ )
_a = feature_extractor(UpperCamelCase__ , return_tensors="np" ).input_features
_a = feature_extractor(UpperCamelCase__ , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCamelCase__ , UpperCamelCase__ ):
self.assertTrue(np.allclose(UpperCamelCase__ , UpperCamelCase__ , atol=1E-3 ) )
def SCREAMING_SNAKE_CASE_ ( self :int ):
_a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_a = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
_a = ["longest", "max_length", "do_not_pad"]
_a = [None, 16, None]
for max_length, padding in zip(UpperCamelCase__ , UpperCamelCase__ ):
_a = feature_extractor(
UpperCamelCase__ , padding=UpperCamelCase__ , max_length=UpperCamelCase__ , return_attention_mask=UpperCamelCase__ )
_a = inputs.input_features
_a = inputs.attention_mask
_a = [np.sum(UpperCamelCase__ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def SCREAMING_SNAKE_CASE_ ( self :List[str] ):
_a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_a = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
_a = ["longest", "max_length", "do_not_pad"]
_a = [None, 16, None]
for max_length, padding in zip(UpperCamelCase__ , UpperCamelCase__ ):
_a = feature_extractor(
UpperCamelCase__ , max_length=UpperCamelCase__ , padding=UpperCamelCase__ , return_tensors="np" , return_attention_mask=UpperCamelCase__ )
_a = inputs.input_features
_a = inputs.attention_mask
_a = [np.sum(UpperCamelCase__ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
_a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_a = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
_a = feature_extractor(
UpperCamelCase__ , padding="max_length" , max_length=4 , truncation=UpperCamelCase__ , return_tensors="np" , return_attention_mask=UpperCamelCase__ , )
_a = inputs.input_features
_a = inputs.attention_mask
_a = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def SCREAMING_SNAKE_CASE_ ( self :Optional[int] ):
_a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_a = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
_a = feature_extractor(
UpperCamelCase__ , padding="longest" , max_length=4 , truncation=UpperCamelCase__ , return_tensors="np" , return_attention_mask=UpperCamelCase__ , )
_a = inputs.input_features
_a = inputs.attention_mask
_a = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 4, 24) )
_a = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
_a = feature_extractor(
UpperCamelCase__ , padding="longest" , max_length=16 , truncation=UpperCamelCase__ , return_tensors="np" , return_attention_mask=UpperCamelCase__ , )
_a = inputs.input_features
_a = inputs.attention_mask
_a = np.sum(attention_mask == 1 , axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape , (3, 6, 24) )
def SCREAMING_SNAKE_CASE_ ( self :List[Any] ):
import torch
_a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_a = np.random.rand(100 , 32 ).astype(np.floataa )
_a = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
_a = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
_a = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def SCREAMING_SNAKE_CASE_ ( self :Optional[Any] , UpperCamelCase__ :str ):
from datasets import load_dataset
_a = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
_a = ds.sort("id" ).select(range(UpperCamelCase__ ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def SCREAMING_SNAKE_CASE_ ( self :Union[str, Any] ):
# fmt: off
_a = np.array([
-1.5745, -1.7713, -1.7020, -1.6069, -1.2250, -1.1105, -0.9072, -0.8241,
-1.2310, -0.8098, -0.3320, -0.4101, -0.7985, -0.4996, -0.8213, -0.9128,
-1.0420, -1.1286, -1.0440, -0.7999, -0.8405, -1.2275, -1.5443, -1.4625,
] )
# fmt: on
_a = self._load_datasamples(1 )
_a = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
_a = feature_extractor(UpperCamelCase__ , return_tensors="pt" ).input_features
self.assertEquals(input_features.shape , (1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] , UpperCamelCase__ , atol=1E-4 ) )
| 388 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def __a ( a ):
"""simple docstring"""
_a = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
_a = [1_4_4, 1_9_2, 2_4_0]
_a = [1_6, 3_2, 6_4, 9_6, 1_2_8, 1_6_0, 6_4_0]
elif "mobilevit_xs" in mobilevit_name:
_a = [9_6, 1_2_0, 1_4_4]
_a = [1_6, 3_2, 4_8, 6_4, 8_0, 9_6, 3_8_4]
elif "mobilevit_xxs" in mobilevit_name:
_a = [6_4, 8_0, 9_6]
_a = [1_6, 1_6, 2_4, 4_8, 6_4, 8_0, 3_2_0]
_a = 0.05
_a = 2.0
if mobilevit_name.startswith("deeplabv3_" ):
_a = 5_1_2
_a = 1_6
_a = 2_1
_a = "pascal-voc-id2label.json"
else:
_a = 1_0_0_0
_a = "imagenet-1k-id2label.json"
_a = "huggingface/label-files"
_a = json.load(open(hf_hub_download(a, a, repo_type="dataset" ), "r" ) )
_a = {int(a ): v for k, v in idalabel.items()}
_a = idalabel
_a = {v: k for k, v in idalabel.items()}
return config
def __a ( a, a=False ):
"""simple docstring"""
for i in range(1, 6 ):
if F'layer_{i}.' in name:
_a = name.replace(F'layer_{i}.', F'encoder.layer.{i - 1}.' )
if "conv_1." in name:
_a = name.replace("conv_1.", "conv_stem." )
if ".block." in name:
_a = name.replace(".block.", "." )
if "exp_1x1" in name:
_a = name.replace("exp_1x1", "expand_1x1" )
if "red_1x1" in name:
_a = name.replace("red_1x1", "reduce_1x1" )
if ".local_rep.conv_3x3." in name:
_a = name.replace(".local_rep.conv_3x3.", ".conv_kxk." )
if ".local_rep.conv_1x1." in name:
_a = name.replace(".local_rep.conv_1x1.", ".conv_1x1." )
if ".norm." in name:
_a = name.replace(".norm.", ".normalization." )
if ".conv." in name:
_a = name.replace(".conv.", ".convolution." )
if ".conv_proj." in name:
_a = name.replace(".conv_proj.", ".conv_projection." )
for i in range(0, 2 ):
for j in range(0, 4 ):
if F'.{i}.{j}.' in name:
_a = name.replace(F'.{i}.{j}.', F'.{i}.layer.{j}.' )
for i in range(2, 6 ):
for j in range(0, 4 ):
if F'.{i}.{j}.' in name:
_a = name.replace(F'.{i}.{j}.', F'.{i}.' )
if "expand_1x1" in name:
_a = name.replace("expand_1x1", "downsampling_layer.expand_1x1" )
if "conv_3x3" in name:
_a = name.replace("conv_3x3", "downsampling_layer.conv_3x3" )
if "reduce_1x1" in name:
_a = name.replace("reduce_1x1", "downsampling_layer.reduce_1x1" )
for i in range(2, 5 ):
if F'.global_rep.{i}.weight' in name:
_a = name.replace(F'.global_rep.{i}.weight', ".layernorm.weight" )
if F'.global_rep.{i}.bias' in name:
_a = name.replace(F'.global_rep.{i}.bias', ".layernorm.bias" )
if ".global_rep." in name:
_a = name.replace(".global_rep.", ".transformer." )
if ".pre_norm_mha.0." in name:
_a = name.replace(".pre_norm_mha.0.", ".layernorm_before." )
if ".pre_norm_mha.1.out_proj." in name:
_a = name.replace(".pre_norm_mha.1.out_proj.", ".attention.output.dense." )
if ".pre_norm_ffn.0." in name:
_a = name.replace(".pre_norm_ffn.0.", ".layernorm_after." )
if ".pre_norm_ffn.1." in name:
_a = name.replace(".pre_norm_ffn.1.", ".intermediate.dense." )
if ".pre_norm_ffn.4." in name:
_a = name.replace(".pre_norm_ffn.4.", ".output.dense." )
if ".transformer." in name:
_a = name.replace(".transformer.", ".transformer.layer." )
if ".aspp_layer." in name:
_a = name.replace(".aspp_layer.", "." )
if ".aspp_pool." in name:
_a = name.replace(".aspp_pool.", "." )
if "seg_head." in name:
_a = name.replace("seg_head.", "segmentation_head." )
if "segmentation_head.classifier.classifier." in name:
_a = name.replace("segmentation_head.classifier.classifier.", "segmentation_head.classifier." )
if "classifier.fc." in name:
_a = name.replace("classifier.fc.", "classifier." )
elif (not base_model) and ("segmentation_head." not in name):
_a = "mobilevit." + name
return name
def __a ( a, a, a=False ):
"""simple docstring"""
if base_model:
_a = ""
else:
_a = "mobilevit."
for key in orig_state_dict.copy().keys():
_a = orig_state_dict.pop(a )
if key[:8] == "encoder.":
_a = key[8:]
if "qkv" in key:
_a = key.split("." )
_a = int(key_split[0][6:] ) - 1
_a = int(key_split[3] )
_a = model.get_submodule(F'{model_prefix}encoder.layer.{layer_num}' )
_a = layer.transformer.layer[transformer_num].attention.attention.all_head_size
_a = (
F'{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'
)
if "weight" in key:
_a = val[:dim, :]
_a = val[dim : dim * 2, :]
_a = val[-dim:, :]
else:
_a = val[:dim]
_a = val[dim : dim * 2]
_a = val[-dim:]
else:
_a = val
return orig_state_dict
def __a ( ):
"""simple docstring"""
_a = "http://images.cocodataset.org/val2017/000000039769.jpg"
_a = Image.open(requests.get(a, stream=a ).raw )
return im
@torch.no_grad()
def __a ( a, a, a, a=False ):
"""simple docstring"""
_a = get_mobilevit_config(a )
# load original state_dict
_a = torch.load(a, map_location="cpu" )
# load 🤗 model
if mobilevit_name.startswith("deeplabv3_" ):
_a = MobileViTForSemanticSegmentation(a ).eval()
else:
_a = MobileViTForImageClassification(a ).eval()
_a = convert_state_dict(a, a )
model.load_state_dict(a )
# Check outputs on an image, prepared by MobileViTImageProcessor
_a = MobileViTImageProcessor(crop_size=config.image_size, size=config.image_size + 3_2 )
_a = image_processor(images=prepare_img(), return_tensors="pt" )
_a = model(**a )
_a = outputs.logits
if mobilevit_name.startswith("deeplabv3_" ):
assert logits.shape == (1, 2_1, 3_2, 3_2)
if mobilevit_name == "deeplabv3_mobilevit_s":
_a = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
_a = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
_a = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
] )
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}' )
assert torch.allclose(logits[0, :3, :3, :3], a, atol=1e-4 )
else:
assert logits.shape == (1, 1_0_0_0)
if mobilevit_name == "mobilevit_s":
_a = torch.tensor([-0.9866, 0.2392, -1.1241] )
elif mobilevit_name == "mobilevit_xs":
_a = torch.tensor([-2.4761, -0.9399, -1.9587] )
elif mobilevit_name == "mobilevit_xxs":
_a = torch.tensor([-1.9364, -1.2327, -0.4653] )
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}' )
assert torch.allclose(logits[0, :3], a, atol=1e-4 )
Path(a ).mkdir(exist_ok=a )
print(F'Saving model {mobilevit_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(a )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(a )
if push_to_hub:
_a = {
"mobilevit_s": "mobilevit-small",
"mobilevit_xs": "mobilevit-x-small",
"mobilevit_xxs": "mobilevit-xx-small",
"deeplabv3_mobilevit_s": "deeplabv3-mobilevit-small",
"deeplabv3_mobilevit_xs": "deeplabv3-mobilevit-x-small",
"deeplabv3_mobilevit_xxs": "deeplabv3-mobilevit-xx-small",
}
print("Pushing to the hub..." )
_a = model_mapping[mobilevit_name]
image_processor.push_to_hub(a, organization="apple" )
model.push_to_hub(a, organization="apple" )
if __name__ == "__main__":
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--mobilevit_name""",
default="""mobilevit_s""",
type=str,
help=(
"""Name of the MobileViT model you'd like to convert. Should be one of 'mobilevit_s', 'mobilevit_xs',"""
""" 'mobilevit_xxs', 'deeplabv3_mobilevit_s', 'deeplabv3_mobilevit_xs', 'deeplabv3_mobilevit_xxs'."""
),
)
parser.add_argument(
"""--checkpoint_path""", required=True, type=str, help="""Path to the original state dict (.pt file)."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether or not to push the converted model to the 🤗 hub."""
)
__SCREAMING_SNAKE_CASE = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 388 | 1 |
from __future__ import annotations
def _snake_case (_snake_case : list[int]) -> bool:
return len(set(UpperCAmelCase__)) == len(UpperCAmelCase__)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708 |
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import KarrasVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class SCREAMING_SNAKE_CASE_ ( _a ):
"""simple docstring"""
__lowerCAmelCase : UNetaDModel
__lowerCAmelCase : KarrasVeScheduler
def __init__( self :List[Any], snake_case :UNetaDModel, snake_case :KarrasVeScheduler):
"""simple docstring"""
super().__init__()
self.register_modules(unet=snake_case, scheduler=snake_case)
@torch.no_grad()
def __call__( self :List[str], snake_case :int = 1, snake_case :int = 50, snake_case :Optional[Union[torch.Generator, List[torch.Generator]]] = None, snake_case :Optional[str] = "pil", snake_case :bool = True, **snake_case :Union[str, Any], ):
"""simple docstring"""
_lowercase =self.unet.config.sample_size
_lowercase =(batch_size, 3, img_size, img_size)
_lowercase =self.unet
# sample x_0 ~ N(0, sigma_0^2 * I)
_lowercase =randn_tensor(snake_case, generator=snake_case, device=self.device) * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(snake_case)
for t in self.progress_bar(self.scheduler.timesteps):
# here sigma_t == t_i from the paper
_lowercase =self.scheduler.schedule[t]
_lowercase =self.scheduler.schedule[t - 1] if t > 0 else 0
# 1. Select temporarily increased noise level sigma_hat
# 2. Add new noise to move from sample_i to sample_hat
_lowercase , _lowercase =self.scheduler.add_noise_to_input(snake_case, snake_case, generator=snake_case)
# 3. Predict the noise residual given the noise magnitude `sigma_hat`
# The model inputs and output are adjusted by following eq. (213) in [1].
_lowercase =(sigma_hat / 2) * model((sample_hat + 1) / 2, sigma_hat / 2).sample
# 4. Evaluate dx/dt at sigma_hat
# 5. Take Euler step from sigma to sigma_prev
_lowercase =self.scheduler.step(snake_case, snake_case, snake_case, snake_case)
if sigma_prev != 0:
# 6. Apply 2nd order correction
# The model inputs and output are adjusted by following eq. (213) in [1].
_lowercase =(sigma_prev / 2) * model((step_output.prev_sample + 1) / 2, sigma_prev / 2).sample
_lowercase =self.scheduler.step_correct(
snake_case, snake_case, snake_case, snake_case, step_output.prev_sample, step_output['derivative'], )
_lowercase =step_output.prev_sample
_lowercase =(sample / 2 + 0.5).clamp(0, 1)
_lowercase =sample.cpu().permute(0, 2, 3, 1).numpy()
if output_type == "pil":
_lowercase =self.numpy_to_pil(snake_case)
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case)
| 557 | 0 |
"""simple docstring"""
import csv
import tweepy
# Twitter API credentials
__A = ""
__A = ""
__A = ""
__A = ""
def __A (_SCREAMING_SNAKE_CASE ) ->None:
"""simple docstring"""
lowerCAmelCase__ :Union[str, Any] = tweepy.OAuthHandler(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
auth.set_access_token(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ :Any = tweepy.API(_SCREAMING_SNAKE_CASE )
# initialize a list to hold all the tweepy Tweets
lowerCAmelCase__ :str = []
# make initial request for most recent tweets (200 is the maximum allowed count)
lowerCAmelCase__ :Optional[int] = api.user_timeline(screen_name=_SCREAMING_SNAKE_CASE , count=200 )
# save most recent tweets
alltweets.extend(_SCREAMING_SNAKE_CASE )
# save the id of the oldest tweet less one
lowerCAmelCase__ :Union[str, Any] = alltweets[-1].id - 1
# keep grabbing tweets until there are no tweets left to grab
while len(_SCREAMING_SNAKE_CASE ) > 0:
print(F"getting tweets before {oldest}" )
# all subsequent requests use the max_id param to prevent duplicates
lowerCAmelCase__ :int = api.user_timeline(
screen_name=_SCREAMING_SNAKE_CASE , count=200 , max_id=_SCREAMING_SNAKE_CASE )
# save most recent tweets
alltweets.extend(_SCREAMING_SNAKE_CASE )
# update the id of the oldest tweet less one
lowerCAmelCase__ :List[str] = alltweets[-1].id - 1
print(F"...{len(_SCREAMING_SNAKE_CASE )} tweets downloaded so far" )
# transform the tweepy tweets into a 2D array that will populate the csv
lowerCAmelCase__ :Tuple = [[tweet.id_str, tweet.created_at, tweet.text] for tweet in alltweets]
# write the csv
with open(F"new_{screen_name}_tweets.csv" , 'w' ) as f:
lowerCAmelCase__ :int = csv.writer(_SCREAMING_SNAKE_CASE )
writer.writerow(['id', 'created_at', 'text'] )
writer.writerows(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# pass in the username of the account you want to download
get_all_tweets("""FirePing32""")
| 93 |
'''simple docstring'''
import argparse
import logging
from collections import namedtuple
import torch
from model_bertabs import BertAbsSummarizer
from models.model_builder import AbsSummarizer # The authors' implementation
from transformers import BertTokenizer
logging.basicConfig(level=logging.INFO)
_UpperCamelCase : Optional[Any] = logging.getLogger(__name__)
_UpperCamelCase : List[Any] = "Hello world! cécé herlolip"
_UpperCamelCase : int = namedtuple(
"BertAbsConfig",
[
"temp_dir",
"large",
"use_bert_emb",
"finetune_bert",
"encoder",
"share_emb",
"max_pos",
"enc_layers",
"enc_hidden_size",
"enc_heads",
"enc_ff_size",
"enc_dropout",
"dec_layers",
"dec_hidden_size",
"dec_heads",
"dec_ff_size",
"dec_dropout",
],
)
def snake_case ( snake_case : int , snake_case : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = BertAbsConfig(
temp_dir='.' , finetune_bert=snake_case , large=snake_case , share_emb=snake_case , use_bert_emb=snake_case , encoder='bert' , max_pos=512 , enc_layers=6 , enc_hidden_size=512 , enc_heads=8 , enc_ff_size=512 , enc_dropout=0.2 , dec_layers=6 , dec_hidden_size=768 , dec_heads=8 , dec_ff_size=2048 , dec_dropout=0.2 , )
lowerCAmelCase = torch.load(snake_case , lambda snake_case , snake_case : storage )
lowerCAmelCase = AbsSummarizer(snake_case , torch.device('cpu' ) , snake_case )
original.eval()
lowerCAmelCase = BertAbsSummarizer(snake_case , torch.device('cpu' ) )
new_model.eval()
# -------------------
# Convert the weights
# -------------------
logging.info('convert the model' )
new_model.bert.load_state_dict(original.bert.state_dict() )
new_model.decoder.load_state_dict(original.decoder.state_dict() )
new_model.generator.load_state_dict(original.generator.state_dict() )
# ----------------------------------
# Make sure the outpus are identical
# ----------------------------------
logging.info('Make sure that the models\' outputs are identical' )
lowerCAmelCase = BertTokenizer.from_pretrained('bert-base-uncased' )
# prepare the model inputs
lowerCAmelCase = tokenizer.encode('This is sample éàalj\'-.' )
encoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case )) )
lowerCAmelCase = torch.tensor(snake_case ).unsqueeze(0 )
lowerCAmelCase = tokenizer.encode('This is sample 3 éàalj\'-.' )
decoder_input_ids.extend([tokenizer.pad_token_id] * (512 - len(snake_case )) )
lowerCAmelCase = torch.tensor(snake_case ).unsqueeze(0 )
# failsafe to make sure the weights reset does not affect the
# loaded weights.
assert torch.max(torch.abs(original.generator[0].weight - new_model.generator[0].weight ) ) == 0
# forward pass
lowerCAmelCase = encoder_input_ids
lowerCAmelCase = decoder_input_ids
lowerCAmelCase = lowerCAmelCase = None
lowerCAmelCase = None
lowerCAmelCase = lowerCAmelCase = None
lowerCAmelCase = lowerCAmelCase = None
lowerCAmelCase = None
# The original model does not apply the geneator layer immediatly but rather in
# the beam search (where it combines softmax + linear layer). Since we already
# apply the softmax in our generation process we only apply the linear layer here.
# We make sure that the outputs of the full stack are identical
lowerCAmelCase = original(snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case )[0]
lowerCAmelCase = original.generator(snake_case )
lowerCAmelCase = new_model(
snake_case , snake_case , snake_case , snake_case , snake_case )[0]
lowerCAmelCase = new_model.generator(snake_case )
lowerCAmelCase = torch.max(torch.abs(output_converted_model - output_original_model ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(snake_case ) )
lowerCAmelCase = torch.max(torch.abs(output_converted_generator - output_original_generator ) ).item()
print('Maximum absolute difference beween weights: {:.2f}'.format(snake_case ) )
lowerCAmelCase = torch.allclose(snake_case , snake_case , atol=1e-3 )
if are_identical:
logging.info('all weights are equal up to 1e-3' )
else:
raise ValueError('the weights are different. The new model is likely different from the original one.' )
# The model has been saved with torch.save(model) and this is bound to the exact
# directory structure. We save the state_dict instead.
logging.info('saving the model\'s state dictionary' )
torch.save(
new_model.state_dict() , './bertabs-finetuned-cnndm-extractive-abstractive-summarization/pytorch_model.bin' )
if __name__ == "__main__":
_UpperCamelCase : List[str] = argparse.ArgumentParser()
parser.add_argument(
"--bertabs_checkpoint_path",
default=None,
type=str,
required=True,
help="Path the official PyTorch dump.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=str,
required=True,
help="Path to the output PyTorch model.",
)
_UpperCamelCase : Optional[int] = parser.parse_args()
convert_bertabs_checkpoints(
args.bertabs_checkpoint_path,
args.pytorch_dump_folder_path,
)
| 284 | 0 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : int , snake_case__ : int ):
"""simple docstring"""
if not isinstance(snake_case__ , snake_case__ ):
raise ValueError("""iterations must be defined as integers""" )
if not isinstance(snake_case__ , snake_case__ ) or not number >= 1:
raise ValueError(
"""starting number must be
and integer and be more than 0""" )
if not iterations >= 1:
raise ValueError("""Iterations must be done more than 0 times to play FizzBuzz""" )
_snake_case : Optional[int] = """"""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(snake_case__ )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod()
| 28 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ = logging.get_logger(__name__)
A_ = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class lowercase( __a ):
'''simple docstring'''
lowercase__ = "roformer"
def __init__( self: List[str], a_: Tuple=50_000, a_: Optional[Any]=None, a_: List[str]=768, a_: Union[str, Any]=12, a_: Optional[int]=12, a_: Optional[Any]=3_072, a_: List[str]="gelu", a_: List[str]=0.1, a_: Tuple=0.1, a_: Optional[int]=1_536, a_: Any=2, a_: Optional[int]=0.02, a_: Tuple=1E-12, a_: Dict=0, a_: str=False, a_: Dict=True, **a_: Dict, ):
'''simple docstring'''
super().__init__(pad_token_id=a_, **a_ )
_snake_case : int = vocab_size
_snake_case : int = hidden_size if embedding_size is None else embedding_size
_snake_case : Dict = hidden_size
_snake_case : Optional[int] = num_hidden_layers
_snake_case : Any = num_attention_heads
_snake_case : Dict = hidden_act
_snake_case : Optional[int] = intermediate_size
_snake_case : List[Any] = hidden_dropout_prob
_snake_case : Union[str, Any] = attention_probs_dropout_prob
_snake_case : Any = max_position_embeddings
_snake_case : Tuple = type_vocab_size
_snake_case : List[Any] = initializer_range
_snake_case : List[Any] = layer_norm_eps
_snake_case : Optional[Any] = rotary_value
_snake_case : List[str] = use_cache
class lowercase( __a ):
'''simple docstring'''
@property
def UpperCamelCase_ ( self: Dict ):
'''simple docstring'''
if self.task == "multiple-choice":
_snake_case : str = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_snake_case : List[str] = {0: """batch""", 1: """sequence"""}
_snake_case : List[Any] = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
("""token_type_ids""", dynamic_axis),
] )
| 28 | 1 |
"""simple docstring"""
import argparse
import requests
import torch
from PIL import Image
from transformers import ViTMAEConfig, ViTMAEForPreTraining, ViTMAEImageProcessor
def __snake_case ( _lowercase ):
"""simple docstring"""
if "cls_token" in name:
UpperCamelCase = name.replace('''cls_token''' ,'''vit.embeddings.cls_token''' )
if "mask_token" in name:
UpperCamelCase = name.replace('''mask_token''' ,'''decoder.mask_token''' )
if "decoder_pos_embed" in name:
UpperCamelCase = name.replace('''decoder_pos_embed''' ,'''decoder.decoder_pos_embed''' )
if "pos_embed" in name and "decoder" not in name:
UpperCamelCase = name.replace('''pos_embed''' ,'''vit.embeddings.position_embeddings''' )
if "patch_embed.proj" in name:
UpperCamelCase = name.replace('''patch_embed.proj''' ,'''vit.embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
UpperCamelCase = name.replace('''patch_embed.norm''' ,'''vit.embeddings.norm''' )
if "decoder_blocks" in name:
UpperCamelCase = name.replace('''decoder_blocks''' ,'''decoder.decoder_layers''' )
if "blocks" in name:
UpperCamelCase = name.replace('''blocks''' ,'''vit.encoder.layer''' )
if "attn.proj" in name:
UpperCamelCase = name.replace('''attn.proj''' ,'''attention.output.dense''' )
if "attn" in name:
UpperCamelCase = name.replace('''attn''' ,'''attention.self''' )
if "norm1" in name:
UpperCamelCase = name.replace('''norm1''' ,'''layernorm_before''' )
if "norm2" in name:
UpperCamelCase = name.replace('''norm2''' ,'''layernorm_after''' )
if "mlp.fc1" in name:
UpperCamelCase = name.replace('''mlp.fc1''' ,'''intermediate.dense''' )
if "mlp.fc2" in name:
UpperCamelCase = name.replace('''mlp.fc2''' ,'''output.dense''' )
if "decoder_embed" in name:
UpperCamelCase = name.replace('''decoder_embed''' ,'''decoder.decoder_embed''' )
if "decoder_norm" in name:
UpperCamelCase = name.replace('''decoder_norm''' ,'''decoder.decoder_norm''' )
if "decoder_pred" in name:
UpperCamelCase = name.replace('''decoder_pred''' ,'''decoder.decoder_pred''' )
if "norm.weight" in name and "decoder" not in name:
UpperCamelCase = name.replace('''norm.weight''' ,'''vit.layernorm.weight''' )
if "norm.bias" in name and "decoder" not in name:
UpperCamelCase = name.replace('''norm.bias''' ,'''vit.layernorm.bias''' )
return name
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
UpperCamelCase = orig_state_dict.pop(_lowercase )
if "qkv" in key:
UpperCamelCase = key.split('''.''' )
UpperCamelCase = int(key_split[1] )
if "decoder_blocks" in key:
UpperCamelCase = config.decoder_hidden_size
UpperCamelCase = '''decoder.decoder_layers.'''
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[dim : dim * 2, :]
UpperCamelCase = val[-dim:, :]
elif "bias" in key:
UpperCamelCase = val[:dim]
UpperCamelCase = val[dim : dim * 2]
UpperCamelCase = val[-dim:]
else:
UpperCamelCase = config.hidden_size
UpperCamelCase = '''vit.encoder.layer.'''
if "weight" in key:
UpperCamelCase = val[:dim, :]
UpperCamelCase = val[dim : dim * 2, :]
UpperCamelCase = val[-dim:, :]
elif "bias" in key:
UpperCamelCase = val[:dim]
UpperCamelCase = val[dim : dim * 2]
UpperCamelCase = val[-dim:]
else:
UpperCamelCase = val
return orig_state_dict
def __snake_case ( _lowercase ,_lowercase ):
"""simple docstring"""
UpperCamelCase = ViTMAEConfig()
if "large" in checkpoint_url:
UpperCamelCase = 1024
UpperCamelCase = 4096
UpperCamelCase = 24
UpperCamelCase = 16
elif "huge" in checkpoint_url:
UpperCamelCase = 14
UpperCamelCase = 1280
UpperCamelCase = 5120
UpperCamelCase = 32
UpperCamelCase = 16
UpperCamelCase = ViTMAEForPreTraining(_lowercase )
UpperCamelCase = torch.hub.load_state_dict_from_url(_lowercase ,map_location='''cpu''' )['''model''']
UpperCamelCase = ViTMAEImageProcessor(size=config.image_size )
UpperCamelCase = convert_state_dict(_lowercase ,_lowercase )
model.load_state_dict(_lowercase )
model.eval()
UpperCamelCase = '''https://user-images.githubusercontent.com/11435359/147738734-196fd92f-9260-48d5-ba7e-bf103d29364d.jpg'''
UpperCamelCase = Image.open(requests.get(_lowercase ,stream=_lowercase ).raw )
UpperCamelCase = ViTMAEImageProcessor(size=config.image_size )
UpperCamelCase = image_processor(images=_lowercase ,return_tensors='''pt''' )
# forward pass
torch.manual_seed(2 )
UpperCamelCase = model(**_lowercase )
UpperCamelCase = outputs.logits
if "large" in checkpoint_url:
UpperCamelCase = torch.tensor(
[[-0.7309, -0.7128, -1.0169], [-1.0161, -0.9058, -1.1878], [-1.0478, -0.9411, -1.1911]] )
elif "huge" in checkpoint_url:
UpperCamelCase = torch.tensor(
[[-1.1599, -0.9199, -1.2221], [-1.1952, -0.9269, -1.2307], [-1.2143, -0.9337, -1.2262]] )
else:
UpperCamelCase = torch.tensor(
[[-0.9192, -0.8481, -1.1259], [-1.1349, -1.0034, -1.2599], [-1.1757, -1.0429, -1.2726]] )
# verify logits
assert torch.allclose(logits[0, :3, :3] ,_lowercase ,atol=1e-4 )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowercase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(_lowercase )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/mae/visualize/mae_visualize_vit_base.pth',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
SCREAMING_SNAKE_CASE_ = parser.parse_args()
convert_vit_mae_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 34 |
'''simple docstring'''
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
lowerCamelCase : str = logging.get_logger(__name__)
lowerCamelCase : str = {'vocab_file': 'spiece.model'}
lowerCamelCase : int = {
'vocab_file': {
'TsinghuaAI/CPM-Generate': 'https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model',
}
}
class __lowerCAmelCase (lowercase_ ):
'''simple docstring'''
def __init__(self : List[Any] , UpperCamelCase : Optional[Any] , UpperCamelCase : Any=False , UpperCamelCase : Dict=True , UpperCamelCase : int=False , UpperCamelCase : Optional[Any]="<s>" , UpperCamelCase : Union[str, Any]="</s>" , UpperCamelCase : Optional[int]="<unk>" , UpperCamelCase : List[Any]="<sep>" , UpperCamelCase : Dict="<pad>" , UpperCamelCase : int="<cls>" , UpperCamelCase : int="<mask>" , UpperCamelCase : str=["<eop>", "<eod>"] , UpperCamelCase : Optional[Dict[str, Any]] = None , **UpperCamelCase : Dict , ):
'''simple docstring'''
lowercase__ = AddedToken(UpperCamelCase , lstrip=UpperCamelCase , rstrip=UpperCamelCase ) if isinstance(UpperCamelCase , UpperCamelCase ) else mask_token
lowercase__ = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=UpperCamelCase , remove_space=UpperCamelCase , keep_accents=UpperCamelCase , bos_token=UpperCamelCase , eos_token=UpperCamelCase , unk_token=UpperCamelCase , sep_token=UpperCamelCase , pad_token=UpperCamelCase , cls_token=UpperCamelCase , mask_token=UpperCamelCase , additional_special_tokens=UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **UpperCamelCase , )
lowercase__ = 3
lowercase__ = do_lower_case
lowercase__ = remove_space
lowercase__ = keep_accents
lowercase__ = vocab_file
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(UpperCamelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
'''You need to install jieba to use CpmTokenizer or CpmTokenizerFast. '''
'''See https://pypi.org/project/jieba/ for installation.''' )
lowercase__ = jieba
lowercase__ = str.maketrans(''' \n''' , '''\u2582\u2583''' )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def UpperCamelCase__ (self : List[str] ):
'''simple docstring'''
return len(self.sp_model )
def UpperCamelCase__ (self : List[Any] ):
'''simple docstring'''
lowercase__ = {self.convert_ids_to_tokens(UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self : Union[str, Any] ):
'''simple docstring'''
lowercase__ = self.__dict__.copy()
lowercase__ = None
return state
def __setstate__(self : Dict , UpperCamelCase : int ):
'''simple docstring'''
lowercase__ = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowercase__ = {}
lowercase__ = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCamelCase__ (self : Optional[int] , UpperCamelCase : Dict ):
'''simple docstring'''
if self.remove_space:
lowercase__ = ''' '''.join(inputs.strip().split() )
else:
lowercase__ = inputs
lowercase__ = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
lowercase__ = unicodedata.normalize('''NFKD''' , UpperCamelCase )
lowercase__ = ''''''.join([c for c in outputs if not unicodedata.combining(UpperCamelCase )] )
if self.do_lower_case:
lowercase__ = outputs.lower()
return outputs
def UpperCamelCase__ (self : List[Any] , UpperCamelCase : str ):
'''simple docstring'''
lowercase__ = self.preprocess_text(UpperCamelCase )
lowercase__ = self.sp_model.encode(UpperCamelCase , out_type=UpperCamelCase )
lowercase__ = []
for piece in pieces:
if len(UpperCamelCase ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
lowercase__ = self.sp_model.EncodeAsPieces(piece[:-1].replace(UpperCamelCase , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowercase__ = cur_pieces[1:]
else:
lowercase__ = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(UpperCamelCase )
else:
new_pieces.append(UpperCamelCase )
return new_pieces
def UpperCamelCase__ (self : int , UpperCamelCase : str ):
'''simple docstring'''
return self.sp_model.PieceToId(UpperCamelCase )
def UpperCamelCase__ (self : str , UpperCamelCase : Optional[Any] ):
'''simple docstring'''
return self.sp_model.IdToPiece(UpperCamelCase )
def UpperCamelCase__ (self : Union[str, Any] , UpperCamelCase : str ):
'''simple docstring'''
lowercase__ = ''''''.join(UpperCamelCase ).replace(UpperCamelCase , ''' ''' ).strip()
return out_string
def UpperCamelCase__ (self : Any , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def UpperCamelCase__ (self : Tuple , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None , UpperCamelCase : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCamelCase , token_ids_a=UpperCamelCase , already_has_special_tokens=UpperCamelCase )
if token_ids_a is not None:
return ([0] * len(UpperCamelCase )) + [1] + ([0] * len(UpperCamelCase )) + [1, 1]
return ([0] * len(UpperCamelCase )) + [1, 1]
def UpperCamelCase__ (self : str , UpperCamelCase : List[int] , UpperCamelCase : Optional[List[int]] = None ):
'''simple docstring'''
lowercase__ = [self.sep_token_id]
lowercase__ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def UpperCamelCase__ (self : Tuple , UpperCamelCase : str , UpperCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(UpperCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
lowercase__ = os.path.join(
UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(UpperCamelCase , '''wb''' ) as fi:
lowercase__ = self.sp_model.serialized_model_proto()
fi.write(UpperCamelCase )
return (out_vocab_file,)
def UpperCamelCase__ (self : List[str] , *UpperCamelCase : List[Any] , **UpperCamelCase : Union[str, Any] ):
'''simple docstring'''
lowercase__ = super()._decode(*UpperCamelCase , **UpperCamelCase )
lowercase__ = text.replace(''' ''' , '''''' ).replace('''\u2582''' , ''' ''' ).replace('''\u2583''' , '''\n''' )
return text
| 460 | 0 |
'''simple docstring'''
def __UpperCamelCase ( UpperCAmelCase ):
return [
{
0: [1, 2],
1: [0, 2],
2: [0, 1, 3, 5],
3: [2, 4],
4: [3],
5: [2, 6, 8],
6: [5, 7],
7: [6, 8],
8: [5, 7],
},
{
0: [6],
1: [9],
2: [4, 5],
3: [4],
4: [2, 3],
5: [2],
6: [0, 7],
7: [6],
8: [],
9: [1],
},
{
0: [4],
1: [6],
2: [],
3: [5, 6, 7],
4: [0, 6],
5: [3, 8, 9],
6: [1, 3, 4, 7],
7: [3, 6, 8, 9],
8: [5, 7],
9: [5, 7],
},
{
0: [1, 3],
1: [0, 2, 4],
2: [1, 3, 4],
3: [0, 2, 4],
4: [1, 2, 3],
},
][index]
def __UpperCamelCase ( UpperCAmelCase ):
lowercase__ : Any = 0
lowercase__ : Tuple = len(UpperCAmelCase ) # No of vertices in graph
lowercase__ : int = [0] * n
lowercase__ : Tuple = [False] * n
def dfs(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase ):
lowercase__ : Union[str, Any] = True
lowercase__ : List[str] = id_
id_ += 1
for to in graph[at]:
if to == parent:
pass
elif not visited[to]:
dfs(UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , id_ )
lowercase__ : str = min(low[at] , low[to] )
if id_ <= low[to]:
bridges.append((at, to) if at < to else (to, at) )
else:
# This edge is a back edge and cannot be a bridge
lowercase__ : str = min(low[at] , low[to] )
lowercase__ : list[tuple[int, int]] = []
for i in range(UpperCAmelCase ):
if not visited[i]:
dfs(UpperCAmelCase , -1 , UpperCAmelCase , id_ )
return bridges
if __name__ == "__main__":
import doctest
doctest.testmod()
| 711 | '''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
__a: Dict = {
"""configuration_speech_to_text""": ["""SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Speech2TextConfig"""],
"""processing_speech_to_text""": ["""Speech2TextProcessor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Tuple = ["""Speech2TextTokenizer"""]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: Optional[Any] = ["""Speech2TextFeatureExtractor"""]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: str = [
"""TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFSpeech2TextForConditionalGeneration""",
"""TFSpeech2TextModel""",
"""TFSpeech2TextPreTrainedModel""",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__a: int = [
"""SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Speech2TextForConditionalGeneration""",
"""Speech2TextModel""",
"""Speech2TextPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
__a: int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 428 | 0 |
import datasets
_lowerCAmelCase: int = '''\
@InProceedings{conneau2018xnli,
author = "Conneau, Alexis
and Rinott, Ruty
and Lample, Guillaume
and Williams, Adina
and Bowman, Samuel R.
and Schwenk, Holger
and Stoyanov, Veselin",
title = "XNLI: Evaluating Cross-lingual Sentence Representations",
booktitle = "Proceedings of the 2018 Conference on Empirical Methods
in Natural Language Processing",
year = "2018",
publisher = "Association for Computational Linguistics",
location = "Brussels, Belgium",
}
'''
_lowerCAmelCase: Tuple = '''\
XNLI is a subset of a few thousand examples from MNLI which has been translated
into a 14 different languages (some low-ish resource). As with MNLI, the goal is
to predict textual entailment (does sentence A imply/contradict/neither sentence
B) and is a classification task (given two sentences, predict one of three
labels).
'''
_lowerCAmelCase: Union[str, Any] = '''
Computes XNLI score which is just simple accuracy.
Args:
predictions: Predicted labels.
references: Ground truth labels.
Returns:
\'accuracy\': accuracy
Examples:
>>> predictions = [0, 1]
>>> references = [0, 1]
>>> xnli_metric = datasets.load_metric("xnli")
>>> results = xnli_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'accuracy\': 1.0}
'''
def _lowercase( __a : List[Any] , __a : List[str] ):
return (preds == labels).mean()
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowercase_ (datasets.Metric ):
def __UpperCamelCase ( self) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('int64' if self.config_name != 'sts-b' else 'float32'),
'references': datasets.Value('int64' if self.config_name != 'sts-b' else 'float32'),
}) , codebase_urls=[] , reference_urls=[] , format='numpy' , )
def __UpperCamelCase ( self , lowercase_ , lowercase_) -> List[Any]:
return {"accuracy": simple_accuracy(_lowerCamelCase , _lowerCamelCase)}
| 20 |
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case ( _snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = CanineTokenizer
SCREAMING_SNAKE_CASE__ = False
def SCREAMING_SNAKE_CASE__ ( self ):
super().setUp()
a :List[str] = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def SCREAMING_SNAKE_CASE__ ( self ):
return CanineTokenizer.from_pretrained('''google/canine-s''' )
def SCREAMING_SNAKE_CASE__ ( self , **_lowerCamelCase ):
a :Dict = self.tokenizer_class.from_pretrained(self.tmpdirname , **_lowerCamelCase )
a :List[Any] = 1024
return tokenizer
@require_torch
def SCREAMING_SNAKE_CASE__ ( self ):
a :Any = self.canine_tokenizer
a :Tuple = ['''Life is like a box of chocolates.''', '''You never know what you\'re gonna get.''']
# fmt: off
a :Any = [5_7344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 5_7345, 0, 0, 0, 0]
# fmt: on
a :Dict = tokenizer(_lowerCamelCase , padding=_lowerCamelCase , return_tensors='''pt''' )
self.assertIsInstance(_lowerCamelCase , _lowerCamelCase )
a :str = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[Any] = self.canine_tokenizer
a :Optional[int] = ['''Once there was a man.''', '''He wrote a test in HuggingFace Tranformers.''']
a :Dict = tokenizer(_lowerCamelCase , padding=_lowerCamelCase , return_tensors='''pt''' )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn('''input_ids''' , _lowerCamelCase )
self.assertIn('''attention_mask''' , _lowerCamelCase )
self.assertIn('''token_type_ids''' , _lowerCamelCase )
@require_torch
def SCREAMING_SNAKE_CASE__ ( self ):
a :Any = self.canine_tokenizer
a :str = [
'''What\'s the weater?''',
'''It\'s about 25 degrees.''',
]
a :List[str] = tokenizer(
text_target=_lowerCamelCase , max_length=32 , padding='''max_length''' , truncation=_lowerCamelCase , return_tensors='''pt''' )
self.assertEqual(32 , targets['''input_ids'''].shape[1] )
def SCREAMING_SNAKE_CASE__ ( self ):
# safety check on max_len default value so we are sure the test works
a :Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
a :Tuple = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
a :List[str] = tempfile.mkdtemp()
a :Any = ''' He is very happy, UNwant\u00E9d,running'''
a :Dict = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
tokenizer.save_pretrained(_lowerCamelCase )
a :Optional[Any] = tokenizer.__class__.from_pretrained(_lowerCamelCase )
a :str = after_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
shutil.rmtree(_lowerCamelCase )
a :str = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
a :Any = tempfile.mkdtemp()
a :Optional[Any] = ''' He is very happy, UNwant\u00E9d,running'''
a :Optional[int] = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
a :str = chr(0Xe_0_0_7 )
additional_special_tokens.append(_lowerCamelCase )
tokenizer.add_special_tokens({'''additional_special_tokens''': additional_special_tokens} )
a :Tuple = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
tokenizer.save_pretrained(_lowerCamelCase )
a :int = tokenizer.__class__.from_pretrained(_lowerCamelCase )
a :Dict = after_tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
self.assertIn(_lowerCamelCase , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
a :str = tokenizer.__class__.from_pretrained(_lowerCamelCase , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a :Tuple = self.get_tokenizers(do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
a , a :Tuple = self.get_clean_sequence(_lowerCamelCase )
# a special token for Canine can be defined as follows:
a :Tuple = 0Xe_0_0_5
a :Optional[int] = chr(_lowerCamelCase )
tokenizer.add_special_tokens({'''cls_token''': special_token} )
a :Optional[int] = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertEqual(len(_lowerCamelCase ) , 1 )
a :List[Any] = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=_lowerCamelCase )
a :List[str] = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
a :Tuple = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
a :Optional[Any] = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
self.assertEqual(_lowerCamelCase , input_encoded + special_token_id )
a :Any = tokenizer.decode(_lowerCamelCase , skip_special_tokens=_lowerCamelCase )
self.assertTrue(special_token not in decoded )
def SCREAMING_SNAKE_CASE__ ( self ):
a :int = self.get_tokenizers(do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
a :int = chr(0Xe_0_0_5 )
a :str = chr(0Xe_0_0_6 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=_lowerCamelCase )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({'''additional_special_tokens''': [SPECIAL_TOKEN_2]} )
a :Optional[int] = tokenizer.tokenize(_lowerCamelCase )
a :Optional[Any] = tokenizer.tokenize(_lowerCamelCase )
self.assertEqual(len(_lowerCamelCase ) , 1 )
self.assertEqual(len(_lowerCamelCase ) , 1 )
self.assertEqual(token_a[0] , _lowerCamelCase )
self.assertEqual(token_a[0] , _lowerCamelCase )
@require_tokenizers
def SCREAMING_SNAKE_CASE__ ( self ):
a :Optional[Any] = self.get_tokenizers(do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
# a special token for Canine can be defined as follows:
a :Optional[int] = 0Xe_0_0_6
a :List[str] = chr(_lowerCamelCase )
a :Optional[int] = AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase )
tokenizer.add_special_tokens({'''additional_special_tokens''': [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(_lowerCamelCase )
tokenizer.from_pretrained(_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self ):
a :int = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_lowerCamelCase )
with open(os.path.join(_lowerCamelCase , '''special_tokens_map.json''' ) , encoding='''utf-8''' ) as json_file:
a :Optional[Any] = json.load(_lowerCamelCase )
with open(os.path.join(_lowerCamelCase , '''tokenizer_config.json''' ) , encoding='''utf-8''' ) as json_file:
a :Tuple = json.load(_lowerCamelCase )
# a special token for Canine can be defined as follows:
a :int = 0Xe_0_0_6
a :Optional[Any] = chr(_lowerCamelCase )
a :Union[str, Any] = [new_token_a]
a :Optional[int] = [new_token_a]
with open(os.path.join(_lowerCamelCase , '''special_tokens_map.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
with open(os.path.join(_lowerCamelCase , '''tokenizer_config.json''' ) , '''w''' , encoding='''utf-8''' ) as outfile:
json.dump(_lowerCamelCase , _lowerCamelCase )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
a :str = tokenizer_class.from_pretrained(_lowerCamelCase , extra_ids=0 )
self.assertIn(_lowerCamelCase , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
a :Optional[int] = 0Xe_0_0_7
a :Any = chr(_lowerCamelCase )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
a :Tuple = [AddedToken(_lowerCamelCase , lstrip=_lowerCamelCase )]
a :Optional[Any] = tokenizer_class.from_pretrained(
_lowerCamelCase , additional_special_tokens=_lowerCamelCase , extra_ids=0 )
self.assertIn(_lowerCamelCase , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[Any] = self.get_tokenizers(do_lower_case=_lowerCamelCase )
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
a :List[str] = '''hello world'''
if self.space_between_special_tokens:
a :Optional[Any] = '''[CLS] hello world [SEP]'''
else:
a :Tuple = input
a :Any = tokenizer.encode(_lowerCamelCase , add_special_tokens=_lowerCamelCase )
a :List[Any] = tokenizer.decode(_lowerCamelCase , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(_lowerCamelCase , [output, output.lower()] )
def SCREAMING_SNAKE_CASE__ ( self ):
a :int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'''{tokenizer.__class__.__name__}''' ):
a :Any = [
'''bos_token''',
'''eos_token''',
'''unk_token''',
'''sep_token''',
'''pad_token''',
'''cls_token''',
'''mask_token''',
]
a :str = '''a'''
a :List[Any] = ord(_lowerCamelCase )
for attr in attributes_list:
setattr(_lowerCamelCase , attr + '''_id''' , _lowerCamelCase )
self.assertEqual(getattr(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(getattr(_lowerCamelCase , attr + '''_id''' ) , _lowerCamelCase )
setattr(_lowerCamelCase , attr + '''_id''' , _lowerCamelCase )
self.assertEqual(getattr(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase )
self.assertEqual(getattr(_lowerCamelCase , attr + '''_id''' ) , _lowerCamelCase )
setattr(_lowerCamelCase , '''additional_special_tokens_ids''' , [] )
self.assertListEqual(getattr(_lowerCamelCase , '''additional_special_tokens''' ) , [] )
self.assertListEqual(getattr(_lowerCamelCase , '''additional_special_tokens_ids''' ) , [] )
a :List[Any] = 0Xe_0_0_6
a :str = chr(_lowerCamelCase )
setattr(_lowerCamelCase , '''additional_special_tokens_ids''' , [additional_special_token_id] )
self.assertListEqual(getattr(_lowerCamelCase , '''additional_special_tokens''' ) , [additional_special_token] )
self.assertListEqual(getattr(_lowerCamelCase , '''additional_special_tokens_ids''' ) , [additional_special_token_id] )
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
pass
def SCREAMING_SNAKE_CASE__ ( self ):
pass
| 445 | 0 |
'''simple docstring'''
from __future__ import annotations
A__ : Optional[Any] = []
def a_ ( _UpperCAmelCase : list[list[int]] ,_UpperCAmelCase : int ,_UpperCAmelCase : int ) -> bool:
for i in range(len(_UpperCAmelCase ) ):
if board[row][i] == 1:
return False
for i in range(len(_UpperCAmelCase ) ):
if board[i][column] == 1:
return False
for i, j in zip(range(_UpperCAmelCase ,-1 ,-1 ) ,range(_UpperCAmelCase ,-1 ,-1 ) ):
if board[i][j] == 1:
return False
for i, j in zip(range(_UpperCAmelCase ,-1 ,-1 ) ,range(_UpperCAmelCase ,len(_UpperCAmelCase ) ) ):
if board[i][j] == 1:
return False
return True
def a_ ( _UpperCAmelCase : list[list[int]] ,_UpperCAmelCase : int ) -> bool:
if row >= len(_UpperCAmelCase ):
solution.append(_UpperCAmelCase )
printboard(_UpperCAmelCase )
print()
return True
for i in range(len(_UpperCAmelCase ) ):
if is_safe(_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ):
__snake_case : Tuple = 1
solve(_UpperCAmelCase ,row + 1 )
__snake_case : Any = 0
return False
def a_ ( _UpperCAmelCase : list[list[int]] ) -> None:
for i in range(len(_UpperCAmelCase ) ):
for j in range(len(_UpperCAmelCase ) ):
if board[i][j] == 1:
print('Q' ,end=' ' )
else:
print('.' ,end=' ' )
print()
# n=int(input("The no. of queens"))
A__ : List[Any] = 8
A__ : Dict = [[0 for i in range(n)] for j in range(n)]
solve(board, 0)
print('''The total no. of solutions are :''', len(solution))
| 124 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ..models.whisper import WhisperForConditionalGeneration, WhisperProcessor
from .base import PipelineTool
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
A__ = '''openai/whisper-base'''
A__ = (
'''This is a tool that transcribes an audio into text. It takes an input named `audio` and returns the '''
'''transcribed text.'''
)
A__ = '''transcriber'''
A__ = WhisperProcessor
A__ = WhisperForConditionalGeneration
A__ = ['''audio''']
A__ = ['''text''']
def A_ ( self : Any , __a : int ) -> Dict:
'''simple docstring'''
return self.pre_processor(__a , return_tensors='pt' ).input_features
def A_ ( self : Optional[int] , __a : List[Any] ) -> Optional[Any]:
'''simple docstring'''
return self.model.generate(inputs=__a )
def A_ ( self : str , __a : str ) -> List[str]:
'''simple docstring'''
return self.pre_processor.batch_decode(__a , skip_special_tokens=__a )[0]
| 124 | 1 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
lowerCAmelCase = logging.get_logger(__name__)
class A ( lowercase_ ):
def __init__(self , *lowerCAmelCase , **lowerCAmelCase ):
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , UpperCamelCase__ , )
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
| 230 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
from ..utils import cached_file
# docstyle-ignore
__lowerCamelCase : List[Any] = '\nHuman: <<task>>\n\nAssistant: '
__lowerCamelCase : Dict = 'huggingface-tools/default-prompts'
__lowerCamelCase : Optional[Any] = {'chat': 'chat_prompt_template.txt', 'run': 'run_prompt_template.txt'}
def _a (__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE="run" ):
"""simple docstring"""
if prompt_or_repo_id is None:
_UpperCamelCase =DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search('''\\s''' , __SCREAMING_SNAKE_CASE ) is not None:
return prompt_or_repo_id
_UpperCamelCase =cached_file(
__SCREAMING_SNAKE_CASE , PROMPT_FILES[mode] , repo_type='''dataset''' , user_agent={'''agent''': agent_name} )
with open(__SCREAMING_SNAKE_CASE , '''r''' , encoding='''utf-8''' ) as f:
return f.read()
| 404 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase__ : int = logging.get_logger(__name__)
UpperCamelCase__ : Optional[int] = {
"google/vivit-b-16x2-kinetics400": (
"https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json"
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class _a (_lowerCamelCase):
"""simple docstring"""
SCREAMING_SNAKE_CASE = 'vivit'
def __init__( self , A__=2_24 , A__=32 , A__=[2, 16, 16] , A__=3 , A__=7_68 , A__=12 , A__=12 , A__=30_72 , A__="gelu_fast" , A__=0.0 , A__=0.0 , A__=0.02 , A__=1E-06 , A__=True , **A__ , ) -> Any:
_SCREAMING_SNAKE_CASE = hidden_size
_SCREAMING_SNAKE_CASE = num_hidden_layers
_SCREAMING_SNAKE_CASE = num_attention_heads
_SCREAMING_SNAKE_CASE = intermediate_size
_SCREAMING_SNAKE_CASE = hidden_act
_SCREAMING_SNAKE_CASE = hidden_dropout_prob
_SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
_SCREAMING_SNAKE_CASE = initializer_range
_SCREAMING_SNAKE_CASE = layer_norm_eps
_SCREAMING_SNAKE_CASE = image_size
_SCREAMING_SNAKE_CASE = num_frames
_SCREAMING_SNAKE_CASE = tubelet_size
_SCREAMING_SNAKE_CASE = num_channels
_SCREAMING_SNAKE_CASE = qkv_bias
super().__init__(**A__ )
| 0 |
'''simple docstring'''
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> int:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError("""multiplicative_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""multiplicative_persistence() does not accept negative values""" )
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ )
while len(SCREAMING_SNAKE_CASE_ ) != 1:
_SCREAMING_SNAKE_CASE = [int(SCREAMING_SNAKE_CASE_ ) for i in num_string]
_SCREAMING_SNAKE_CASE = 1
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
total *= numbers[i]
_SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ )
steps += 1
return steps
def lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ) -> int:
"""simple docstring"""
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError("""additive_persistence() only accepts integral values""" )
if num < 0:
raise ValueError("""additive_persistence() does not accept negative values""" )
_SCREAMING_SNAKE_CASE = 0
_SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ )
while len(SCREAMING_SNAKE_CASE_ ) != 1:
_SCREAMING_SNAKE_CASE = [int(SCREAMING_SNAKE_CASE_ ) for i in num_string]
_SCREAMING_SNAKE_CASE = 0
for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) ):
total += numbers[i]
_SCREAMING_SNAKE_CASE = str(SCREAMING_SNAKE_CASE_ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 0 | 1 |
import glob
import os
import random
from string import ascii_lowercase, digits
import cva
import numpy as np
# Parrameters
_A : int = (7_20, 12_80) # Height, Width
_A : Optional[Any] = (0.4, 0.6) # if height or width lower than this scale, drop it.
_A : Optional[Any] = 1 / 1_00
_A : Any = """"""
_A : Dict = """"""
_A : str = """"""
_A : Optional[int] = 2_50
def __snake_case ( ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE__ = get_dataset(_a , _a )
for index in range(_a ):
SCREAMING_SNAKE_CASE__ = random.sample(range(len(_a ) ) , 4 )
SCREAMING_SNAKE_CASE__ = update_image_and_anno(
_a , _a , _a , _a , _a , filter_scale=_a , )
# Get random string code: '7b7ad245cdff75241935e4dd860f3bad'
SCREAMING_SNAKE_CASE__ = random_chars(3_2 )
SCREAMING_SNAKE_CASE__ = path.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
SCREAMING_SNAKE_CASE__ = f'''{OUTPUT_DIR}/{file_name}_MOSAIC_{letter_code}'''
cva.imwrite(f'''{file_root}.jpg''' , _a , [cva.IMWRITE_JPEG_QUALITY, 8_5] )
print(f'''Succeeded {index+1}/{NUMBER_IMAGES} with {file_name}''' )
SCREAMING_SNAKE_CASE__ = []
for anno in new_annos:
SCREAMING_SNAKE_CASE__ = anno[3] - anno[1]
SCREAMING_SNAKE_CASE__ = anno[4] - anno[2]
SCREAMING_SNAKE_CASE__ = anno[1] + width / 2
SCREAMING_SNAKE_CASE__ = anno[2] + height / 2
SCREAMING_SNAKE_CASE__ = f'''{anno[0]} {x_center} {y_center} {width} {height}'''
annos_list.append(_a )
with open(f'''{file_root}.txt''' , '''w''' ) as outfile:
outfile.write('''\n'''.join(line for line in annos_list ) )
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ ) -> Any:
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
for label_file in glob.glob(os.path.join(_a , '''*.txt''' ) ):
SCREAMING_SNAKE_CASE__ = label_file.split(os.sep )[-1].rsplit('''.''' , 1 )[0]
with open(_a ) as in_file:
SCREAMING_SNAKE_CASE__ = in_file.readlines()
SCREAMING_SNAKE_CASE__ = os.path.join(_a , f'''{label_name}.jpg''' )
SCREAMING_SNAKE_CASE__ = []
for obj_list in obj_lists:
SCREAMING_SNAKE_CASE__ = obj_list.rstrip('''\n''' ).split(''' ''' )
SCREAMING_SNAKE_CASE__ = float(obj[1] ) - float(obj[3] ) / 2
SCREAMING_SNAKE_CASE__ = float(obj[2] ) - float(obj[4] ) / 2
SCREAMING_SNAKE_CASE__ = float(obj[1] ) + float(obj[3] ) / 2
SCREAMING_SNAKE_CASE__ = float(obj[2] ) + float(obj[4] ) / 2
boxes.append([int(obj[0] ), xmin, ymin, xmax, ymax] )
if not boxes:
continue
img_paths.append(_a )
labels.append(_a )
return img_paths, labels
def __snake_case ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ = 0.0 , ) -> Tuple:
SCREAMING_SNAKE_CASE__ = np.zeros([output_size[0], output_size[1], 3] , dtype=np.uinta )
SCREAMING_SNAKE_CASE__ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
SCREAMING_SNAKE_CASE__ = scale_range[0] + random.random() * (scale_range[1] - scale_range[0])
SCREAMING_SNAKE_CASE__ = int(scale_x * output_size[1] )
SCREAMING_SNAKE_CASE__ = int(scale_y * output_size[0] )
SCREAMING_SNAKE_CASE__ = []
SCREAMING_SNAKE_CASE__ = []
for i, index in enumerate(_a ):
SCREAMING_SNAKE_CASE__ = all_img_list[index]
path_list.append(_a )
SCREAMING_SNAKE_CASE__ = all_annos[index]
SCREAMING_SNAKE_CASE__ = cva.imread(_a )
if i == 0: # top-left
SCREAMING_SNAKE_CASE__ = cva.resize(_a , (divid_point_x, divid_point_y) )
SCREAMING_SNAKE_CASE__ = img
for bbox in img_annos:
SCREAMING_SNAKE_CASE__ = bbox[1] * scale_x
SCREAMING_SNAKE_CASE__ = bbox[2] * scale_y
SCREAMING_SNAKE_CASE__ = bbox[3] * scale_x
SCREAMING_SNAKE_CASE__ = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 1: # top-right
SCREAMING_SNAKE_CASE__ = cva.resize(_a , (output_size[1] - divid_point_x, divid_point_y) )
SCREAMING_SNAKE_CASE__ = img
for bbox in img_annos:
SCREAMING_SNAKE_CASE__ = scale_x + bbox[1] * (1 - scale_x)
SCREAMING_SNAKE_CASE__ = bbox[2] * scale_y
SCREAMING_SNAKE_CASE__ = scale_x + bbox[3] * (1 - scale_x)
SCREAMING_SNAKE_CASE__ = bbox[4] * scale_y
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
elif i == 2: # bottom-left
SCREAMING_SNAKE_CASE__ = cva.resize(_a , (divid_point_x, output_size[0] - divid_point_y) )
SCREAMING_SNAKE_CASE__ = img
for bbox in img_annos:
SCREAMING_SNAKE_CASE__ = bbox[1] * scale_x
SCREAMING_SNAKE_CASE__ = scale_y + bbox[2] * (1 - scale_y)
SCREAMING_SNAKE_CASE__ = bbox[3] * scale_x
SCREAMING_SNAKE_CASE__ = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
else: # bottom-right
SCREAMING_SNAKE_CASE__ = cva.resize(
_a , (output_size[1] - divid_point_x, output_size[0] - divid_point_y) )
SCREAMING_SNAKE_CASE__ = img
for bbox in img_annos:
SCREAMING_SNAKE_CASE__ = scale_x + bbox[1] * (1 - scale_x)
SCREAMING_SNAKE_CASE__ = scale_y + bbox[2] * (1 - scale_y)
SCREAMING_SNAKE_CASE__ = scale_x + bbox[3] * (1 - scale_x)
SCREAMING_SNAKE_CASE__ = scale_y + bbox[4] * (1 - scale_y)
new_anno.append([bbox[0], xmin, ymin, xmax, ymax] )
# Remove bounding box small than scale of filter
if filter_scale > 0:
SCREAMING_SNAKE_CASE__ = [
anno
for anno in new_anno
if filter_scale < (anno[3] - anno[1]) and filter_scale < (anno[4] - anno[2])
]
return output_img, new_anno, path_list[0]
def __snake_case ( lowerCAmelCase_ ) -> Tuple:
assert number_char > 1, "The number of character should greater than 1"
SCREAMING_SNAKE_CASE__ = ascii_lowercase + digits
return "".join(random.choice(_a ) for _ in range(_a ) )
if __name__ == "__main__":
main()
print("""DONE ✅""")
| 100 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {
"""google/pegasus-large""": """https://huggingface.co/google/pegasus-large/resolve/main/config.json""",
# See all PEGASUS models at https://huggingface.co/models?filter=pegasus
}
class _lowercase ( _A ):
_a : Dict = 'pegasus'
_a : Tuple = ['past_key_values']
_a : str = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , a=5_0_2_6_5 , a=1_0_2_4 , a=1_2 , a=4_0_9_6 , a=1_6 , a=1_2 , a=4_0_9_6 , a=1_6 , a=0.0 , a=0.0 , a=True , a=True , a="gelu" , a=1_0_2_4 , a=0.1 , a=0.0 , a=0.0 , a=0.02 , a=0 , a=False , a=0 , a=1 , a=1 , **a , ):
snake_case__ : List[Any] =vocab_size
snake_case__ : Optional[int] =max_position_embeddings
snake_case__ : str =d_model
snake_case__ : Tuple =encoder_ffn_dim
snake_case__ : str =encoder_layers
snake_case__ : Union[str, Any] =encoder_attention_heads
snake_case__ : Tuple =decoder_ffn_dim
snake_case__ : Optional[Any] =decoder_layers
snake_case__ : Union[str, Any] =decoder_attention_heads
snake_case__ : Optional[Any] =dropout
snake_case__ : str =attention_dropout
snake_case__ : Optional[int] =activation_dropout
snake_case__ : Union[str, Any] =activation_function
snake_case__ : List[Any] =init_std
snake_case__ : Any =encoder_layerdrop
snake_case__ : int =decoder_layerdrop
snake_case__ : Any =use_cache
snake_case__ : Tuple =encoder_layers
snake_case__ : Optional[Any] =scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=a , eos_token_id=a , is_encoder_decoder=a , decoder_start_token_id=a , forced_eos_token_id=a , **a , )
@property
def lowercase__ ( self ):
return self.encoder_attention_heads
@property
def lowercase__ ( self ):
return self.d_model
| 385 | 0 |
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
_lowercase = [
"""python""",
"""tqdm""",
"""regex""",
"""requests""",
"""packaging""",
"""filelock""",
"""numpy""",
"""tokenizers""",
"""huggingface-hub""",
"""safetensors""",
"""accelerate""",
"""pyyaml""",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F"can't find {pkg} in {deps.keys()}, check dependency_versions_table.py")
def SCREAMING_SNAKE_CASE_ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : Optional[Any]=None ) -> Tuple:
require_version(deps[pkg] , UpperCAmelCase_ )
| 431 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def SCREAMING_SNAKE_CASE_ ( ) -> List[Any]:
SCREAMING_SNAKE_CASE_ : Optional[Any] ={
'''repo_name''': ['''test_repo1''', '''test_repo2''', '''test_repo3'''],
'''path''': ['''test_1.py''', '''test_2.py''', '''unit_test.py'''],
'''content''': ['''a ''' * 2_0, '''a ''' * 3_0, '''b ''' * 7],
}
SCREAMING_SNAKE_CASE_ : Any =Dataset.from_dict(UpperCAmelCase_ )
return dataset
class lowercase_ ( A ):
def _snake_case ( self ) -> Dict:
SCREAMING_SNAKE_CASE_ : int =get_dataset()
SCREAMING_SNAKE_CASE_ : Any =make_duplicate_clusters(__A , 0.85 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def _snake_case ( self ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ : Optional[int] =get_dataset()
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ : Dict =deduplicate_dataset(__A )
self.assertEqual(len(__A ) , 2 )
print(__A )
self.assertEqual(duplicate_clusters[0][0]['''copies'''] , 2 )
self.assertEqual(duplicate_clusters[0][0]['''is_extreme'''] , __A )
| 431 | 1 |
from __future__ import annotations
import os
from collections.abc import Mapping
SCREAMING_SNAKE_CASE__ : Dict = tuple[int, int]
class snake_case :
def __init__( self : Tuple , a_ : set[int] , a_ : Mapping[EdgeT, int] )-> None:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : set[int] = vertices
SCREAMING_SNAKE_CASE__ : dict[EdgeT, int] = {
(min(a_ ), max(a_ )): weight for edge, weight in edges.items()
}
def __lowercase( self : Optional[int] , a_ : EdgeT , a_ : int )-> None:
"""simple docstring"""
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
SCREAMING_SNAKE_CASE__ : Any = weight
def __lowercase( self : Optional[Any] )-> Graph:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Graph = Graph({min(self.vertices )} , {} )
SCREAMING_SNAKE_CASE__ : EdgeT
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : EdgeT
SCREAMING_SNAKE_CASE__ : int
while len(subgraph.vertices ) < len(self.vertices ):
SCREAMING_SNAKE_CASE__ : Optional[Any] = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
SCREAMING_SNAKE_CASE__ : Any = edge
SCREAMING_SNAKE_CASE__ : Tuple = weight
subgraph.add_edge(a_ , a_ )
return subgraph
def _a ( lowercase__ : str = "p107_network.txt" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ : str = os.path.abspath(os.path.dirname(lowercase__ ) )
SCREAMING_SNAKE_CASE__ : str = os.path.join(lowercase__ , lowercase__ )
SCREAMING_SNAKE_CASE__ : dict[EdgeT, int] = {}
SCREAMING_SNAKE_CASE__ : list[str]
SCREAMING_SNAKE_CASE__ : int
SCREAMING_SNAKE_CASE__ : int
with open(lowercase__ ) as f:
SCREAMING_SNAKE_CASE__ : Union[str, Any] = f.read().strip().split('\n' )
SCREAMING_SNAKE_CASE__ : Any = [line.split(',' ) for line in data]
for edgea in range(1 , len(lowercase__ ) ):
for edgea in range(lowercase__ ):
if adjaceny_matrix[edgea][edgea] != "-":
SCREAMING_SNAKE_CASE__ : Union[str, Any] = int(adjaceny_matrix[edgea][edgea] )
SCREAMING_SNAKE_CASE__ : Graph = Graph(set(range(len(lowercase__ ) ) ) , lowercase__ )
SCREAMING_SNAKE_CASE__ : Graph = graph.prims_algorithm()
SCREAMING_SNAKE_CASE__ : int = sum(graph.edges.values() )
SCREAMING_SNAKE_CASE__ : int = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F"""{solution() = }""")
| 85 |
import cva
import numpy as np
class snake_case :
'''simple docstring'''
def __init__( self : Any , lowerCAmelCase_ : float , lowerCAmelCase_ : int ) -> List[Any]:
"""simple docstring"""
if k in (0.04, 0.06):
SCREAMING_SNAKE_CASE_ = k
SCREAMING_SNAKE_CASE_ = window_size
else:
raise ValueError('''invalid k value''' )
def __str__( self : Tuple ) -> str:
"""simple docstring"""
return str(self.k )
def _lowercase ( self : List[str] , lowerCAmelCase_ : str ) -> tuple[cva.Mat, list[list[int]]]:
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = cva.imread(lowerCAmelCase_ , 0 )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = img.shape
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = img.copy()
SCREAMING_SNAKE_CASE_ = cva.cvtColor(lowerCAmelCase_ , cva.COLOR_GRAY2RGB )
SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ = np.gradient(lowerCAmelCase_ )
SCREAMING_SNAKE_CASE_ = dx**2
SCREAMING_SNAKE_CASE_ = dy**2
SCREAMING_SNAKE_CASE_ = dx * dy
SCREAMING_SNAKE_CASE_ = 0.04
SCREAMING_SNAKE_CASE_ = self.window_size // 2
for y in range(lowerCAmelCase_ , h - offset ):
for x in range(lowerCAmelCase_ , w - offset ):
SCREAMING_SNAKE_CASE_ = ixx[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = iyy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = ixy[
y - offset : y + offset + 1, x - offset : x + offset + 1
].sum()
SCREAMING_SNAKE_CASE_ = (wxx * wyy) - (wxy**2)
SCREAMING_SNAKE_CASE_ = wxx + wyy
SCREAMING_SNAKE_CASE_ = det - k * (trace**2)
# Can change the value
if r > 0.5:
corner_list.append([x, y, r] )
color_img.itemset((y, x, 0) , 0 )
color_img.itemset((y, x, 1) , 0 )
color_img.itemset((y, x, 2) , 255 )
return color_img, corner_list
if __name__ == "__main__":
A_ = HarrisCorner(0.04, 3)
A_ ,A_ = edge_detect.detect("path_to_image")
cva.imwrite("detect.png", color_img)
| 393 | 0 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class __a :
'''simple docstring'''
def __init__( self , UpperCamelCase__ , UpperCamelCase__=13 , UpperCamelCase__=10 , UpperCamelCase__=3 , UpperCamelCase__=2 , UpperCamelCase__=2 , UpperCamelCase__=True , UpperCamelCase__=True , UpperCamelCase__=32 , UpperCamelCase__=5 , UpperCamelCase__=4 , UpperCamelCase__=37 , UpperCamelCase__="gelu" , UpperCamelCase__=0.1 , UpperCamelCase__=0.1 , UpperCamelCase__=10 , UpperCamelCase__=0.02 , UpperCamelCase__="divided_space_time" , UpperCamelCase__=None , ):
SCREAMING_SNAKE_CASE_ : List[str] = parent
SCREAMING_SNAKE_CASE_ : str = batch_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = image_size
SCREAMING_SNAKE_CASE_ : Dict = num_channels
SCREAMING_SNAKE_CASE_ : List[str] = patch_size
SCREAMING_SNAKE_CASE_ : int = num_frames
SCREAMING_SNAKE_CASE_ : List[str] = is_training
SCREAMING_SNAKE_CASE_ : Union[str, Any] = use_labels
SCREAMING_SNAKE_CASE_ : int = hidden_size
SCREAMING_SNAKE_CASE_ : List[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : str = num_attention_heads
SCREAMING_SNAKE_CASE_ : int = intermediate_size
SCREAMING_SNAKE_CASE_ : Tuple = hidden_act
SCREAMING_SNAKE_CASE_ : List[str] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : int = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = attention_type
SCREAMING_SNAKE_CASE_ : int = initializer_range
SCREAMING_SNAKE_CASE_ : List[str] = scope
SCREAMING_SNAKE_CASE_ : Any = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
SCREAMING_SNAKE_CASE_ : List[Any] = (image_size // patch_size) ** 2
SCREAMING_SNAKE_CASE_ : str = (num_frames) * self.num_patches_per_frame + 1
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE_ : Any = None
if self.use_labels:
SCREAMING_SNAKE_CASE_ : str = ids_tensor([self.batch_size] , self.num_labels )
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
SCREAMING_SNAKE_CASE_ : List[Any] = self.num_labels
return config
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Any = TimesformerModel(config=__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE_ : int = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Any = TimesformerForVideoClassification(__A )
model.to(__A )
model.eval()
SCREAMING_SNAKE_CASE_ : Dict = model(__A )
# verify the logits shape
SCREAMING_SNAKE_CASE_ : Union[str, Any] = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , __A )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Dict = self.prepare_config_and_inputs()
SCREAMING_SNAKE_CASE_ : List[str] = config_and_inputs
SCREAMING_SNAKE_CASE_ : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __a ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase__ : int = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
UpperCAmelCase__ : List[str] = (
{"""feature-extraction""": TimesformerModel, """video-classification""": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
UpperCAmelCase__ : str = False
UpperCAmelCase__ : Optional[Any] = False
UpperCAmelCase__ : Union[str, Any] = False
UpperCAmelCase__ : List[Any] = False
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[int] = TimesformerModelTester(self )
SCREAMING_SNAKE_CASE_ : Tuple = ConfigTester(
self , config_class=__A , has_text_modality=__A , hidden_size=37 )
def __snake_case ( self , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__=False ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = copy.deepcopy(__A )
if return_labels:
if model_class in get_values(__A ):
SCREAMING_SNAKE_CASE_ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
return inputs_dict
def __snake_case ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason='TimeSformer does not use inputs_embeds' )
def __snake_case ( self ):
pass
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Any = model_class(__A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
SCREAMING_SNAKE_CASE_ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A , nn.Linear ) )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : Any = model_class(__A )
SCREAMING_SNAKE_CASE_ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE_ : Union[str, Any] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE_ : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __A )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*__A )
@slow
def __snake_case ( self ):
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE_ : List[str] = TimesformerModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def __snake_case ( self ):
if not self.has_attentions:
pass
else:
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : List[Any] = self.model_tester.seq_length
SCREAMING_SNAKE_CASE_ : Optional[int] = self.model_tester.num_frames
SCREAMING_SNAKE_CASE_ : int = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = False
SCREAMING_SNAKE_CASE_ : Optional[int] = True
SCREAMING_SNAKE_CASE_ : Optional[int] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : str = model(**self._prepare_for_class(__A , __A ) )
SCREAMING_SNAKE_CASE_ : Dict = outputs.attentions
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
SCREAMING_SNAKE_CASE_ : List[Any] = True
SCREAMING_SNAKE_CASE_ : List[str] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : int = model(**self._prepare_for_class(__A , __A ) )
SCREAMING_SNAKE_CASE_ : Dict = outputs.attentions
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
SCREAMING_SNAKE_CASE_ : Dict = len(__A )
# Check attention is always last and order is fine
SCREAMING_SNAKE_CASE_ : Union[str, Any] = True
SCREAMING_SNAKE_CASE_ : List[str] = True
SCREAMING_SNAKE_CASE_ : Optional[Any] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : str = model(**self._prepare_for_class(__A , __A ) )
self.assertEqual(out_len + 1 , len(__A ) )
SCREAMING_SNAKE_CASE_ : str = outputs.attentions
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def __snake_case ( self ):
def check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ):
SCREAMING_SNAKE_CASE_ : Optional[int] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : str = model(**self._prepare_for_class(__A , __A ) )
SCREAMING_SNAKE_CASE_ : Dict = outputs.hidden_states
SCREAMING_SNAKE_CASE_ : Union[str, Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__A ) , __A )
SCREAMING_SNAKE_CASE_ : Tuple = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
SCREAMING_SNAKE_CASE_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE_ : int = True
check_hidden_states_output(__A , __A , __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE_ : str = True
check_hidden_states_output(__A , __A , __A )
def _lowerCamelCase( ):
'''simple docstring'''
SCREAMING_SNAKE_CASE_ : List[Any] = hf_hub_download(
repo_id='hf-internal-testing/spaghetti-video' , filename='eating_spaghetti.npy' , repo_type='dataset' )
SCREAMING_SNAKE_CASE_ : List[Any] = np.load(snake_case_ )
return list(snake_case_ )
@require_torch
@require_vision
class __a ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __snake_case ( self ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def __snake_case ( self ):
SCREAMING_SNAKE_CASE_ : Tuple = TimesformerForVideoClassification.from_pretrained('facebook/timesformer-base-finetuned-k400' ).to(
__A )
SCREAMING_SNAKE_CASE_ : Tuple = self.default_image_processor
SCREAMING_SNAKE_CASE_ : str = prepare_video()
SCREAMING_SNAKE_CASE_ : str = image_processor(video[:8] , return_tensors='pt' ).to(__A )
# forward pass
with torch.no_grad():
SCREAMING_SNAKE_CASE_ : Optional[Any] = model(**__A )
# verify the logits
SCREAMING_SNAKE_CASE_ : Optional[Any] = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , __A )
SCREAMING_SNAKE_CASE_ : Tuple = torch.tensor([-0.30_16, -0.77_13, -0.42_05] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __A , atol=1E-4 ) ) | 703 |
from math import sqrt
def _lowerCamelCase( lowerCAmelCase__ : int ):
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and (
number >= 0
), "'number' must been an int and positive"
SCREAMING_SNAKE_CASE_ : Dict = True
# 0 and 1 are none primes.
if number <= 1:
SCREAMING_SNAKE_CASE_ : Optional[int] = False
for divisor in range(2 , int(round(sqrt(lowerCAmelCase__ ) ) ) + 1 ):
# if 'number' divisible by 'divisor' then sets 'status'
# of false and break up the loop.
if number % divisor == 0:
SCREAMING_SNAKE_CASE_ : Dict = False
break
# precondition
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ), "'status' must been from type bool"
return status
def _lowerCamelCase( lowerCAmelCase__ : int ):
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and (n > 2), "'N' must been an int and > 2"
# beginList: contains all natural numbers from 2 up to N
SCREAMING_SNAKE_CASE_ : Optional[Any] = list(range(2 , n + 1 ) )
SCREAMING_SNAKE_CASE_ : Any = [] # this list will be returns.
# actual sieve of erathostenes
for i in range(len(lowerCAmelCase__ ) ):
for j in range(i + 1 , len(lowerCAmelCase__ ) ):
if (begin_list[i] != 0) and (begin_list[j] % begin_list[i] == 0):
SCREAMING_SNAKE_CASE_ : str = 0
# filters actual prime numbers.
SCREAMING_SNAKE_CASE_ : int = [x for x in begin_list if x != 0]
# precondition
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ), "'ans' must been from type list"
return ans
def _lowerCamelCase( lowerCAmelCase__ : List[str] ):
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and (n > 2), "'N' must been an int and > 2"
SCREAMING_SNAKE_CASE_ : int = []
# iterates over all numbers between 2 up to N+1
# if a number is prime then appends to list 'ans'
for number in range(2 , n + 1 ):
if is_prime(lowerCAmelCase__ ):
ans.append(lowerCAmelCase__ )
# precondition
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ), "'ans' must been from type list"
return ans
def _lowerCamelCase( lowerCAmelCase__ : Union[str, Any] ):
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and number >= 0, "'number' must been an int and >= 0"
SCREAMING_SNAKE_CASE_ : Optional[int] = [] # this list will be returns of the function.
# potential prime number factors.
SCREAMING_SNAKE_CASE_ : Optional[Any] = 2
SCREAMING_SNAKE_CASE_ : List[Any] = number
if number == 0 or number == 1:
ans.append(lowerCAmelCase__ )
# if 'number' not prime then builds the prime factorization of 'number'
elif not is_prime(lowerCAmelCase__ ):
while quotient != 1:
if is_prime(lowerCAmelCase__ ) and (quotient % factor == 0):
ans.append(lowerCAmelCase__ )
quotient /= factor
else:
factor += 1
else:
ans.append(lowerCAmelCase__ )
# precondition
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ), "'ans' must been from type list"
return ans
def _lowerCamelCase( lowerCAmelCase__ : Any ):
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
SCREAMING_SNAKE_CASE_ : int = 0
# prime factorization of 'number'
SCREAMING_SNAKE_CASE_ : Optional[Any] = prime_factorization(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Dict = max(lowerCAmelCase__ )
# precondition
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ), "'ans' must been from type int"
return ans
def _lowerCamelCase( lowerCAmelCase__ : Optional[Any] ):
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and (
number >= 0
), "'number' bust been an int and >= 0"
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
# prime factorization of 'number'
SCREAMING_SNAKE_CASE_ : Union[str, Any] = prime_factorization(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = min(lowerCAmelCase__ )
# precondition
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ), "'ans' must been from type int"
return ans
def _lowerCamelCase( lowerCAmelCase__ : Optional[int] ):
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ), "'number' must been an int"
assert isinstance(number % 2 == 0 , lowerCAmelCase__ ), "compare bust been from type bool"
return number % 2 == 0
def _lowerCamelCase( lowerCAmelCase__ : Optional[int] ):
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ), "'number' must been an int"
assert isinstance(number % 2 != 0 , lowerCAmelCase__ ), "compare bust been from type bool"
return number % 2 != 0
def _lowerCamelCase( lowerCAmelCase__ : Optional[Any] ):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and (number > 2) and is_even(lowerCAmelCase__ )
), "'number' must been an int, even and > 2"
SCREAMING_SNAKE_CASE_ : Any = [] # this list will returned
# creates a list of prime numbers between 2 up to 'number'
SCREAMING_SNAKE_CASE_ : int = get_prime_numbers(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(lowerCAmelCase__ )
# run variable for while-loops.
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : List[str] = None
# exit variable. for break up the loops
SCREAMING_SNAKE_CASE_ : Optional[Any] = True
while i < len_pn and loop:
SCREAMING_SNAKE_CASE_ : Optional[int] = i + 1
while j < len_pn and loop:
if prime_numbers[i] + prime_numbers[j] == number:
SCREAMING_SNAKE_CASE_ : List[str] = False
ans.append(prime_numbers[i] )
ans.append(prime_numbers[j] )
j += 1
i += 1
# precondition
assert (
isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
and (len(lowerCAmelCase__ ) == 2)
and (ans[0] + ans[1] == number)
and is_prime(ans[0] )
and is_prime(ans[1] )
), "'ans' must contains two primes. And sum of elements must been eq 'number'"
return ans
def _lowerCamelCase( lowerCAmelCase__ : str , lowerCAmelCase__ : List[Any] ):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
and (numbera >= 0)
and (numbera >= 0)
), "'number1' and 'number2' must been positive integer."
SCREAMING_SNAKE_CASE_ : int = 0
while numbera != 0:
SCREAMING_SNAKE_CASE_ : List[str] = numbera % numbera
SCREAMING_SNAKE_CASE_ : Optional[int] = numbera
SCREAMING_SNAKE_CASE_ : List[Any] = rest
# precondition
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and (
numbera >= 0
), "'number' must been from type int and positive"
return numbera
def _lowerCamelCase( lowerCAmelCase__ : int , lowerCAmelCase__ : Optional[Any] ):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
and (numbera >= 1)
and (numbera >= 1)
), "'number1' and 'number2' must been positive integer."
SCREAMING_SNAKE_CASE_ : Optional[Any] = 1 # actual answer that will be return.
# for kgV (x,1)
if numbera > 1 and numbera > 1:
# builds the prime factorization of 'number1' and 'number2'
SCREAMING_SNAKE_CASE_ : Tuple = prime_factorization(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : List[Any] = prime_factorization(lowerCAmelCase__ )
elif numbera == 1 or numbera == 1:
SCREAMING_SNAKE_CASE_ : List[Any] = []
SCREAMING_SNAKE_CASE_ : Optional[Any] = []
SCREAMING_SNAKE_CASE_ : int = max(lowerCAmelCase__ , lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 0
SCREAMING_SNAKE_CASE_ : Any = 0
SCREAMING_SNAKE_CASE_ : Optional[Any] = [] # captured numbers int both 'primeFac1' and 'primeFac2'
# iterates through primeFac1
for n in prime_fac_a:
if n not in done:
if n in prime_fac_a:
SCREAMING_SNAKE_CASE_ : int = prime_fac_a.count(lowerCAmelCase__ )
SCREAMING_SNAKE_CASE_ : Tuple = prime_fac_a.count(lowerCAmelCase__ )
for _ in range(max(lowerCAmelCase__ , lowerCAmelCase__ ) ):
ans *= n
else:
SCREAMING_SNAKE_CASE_ : List[Any] = prime_fac_a.count(lowerCAmelCase__ )
for _ in range(lowerCAmelCase__ ):
ans *= n
done.append(lowerCAmelCase__ )
# iterates through primeFac2
for n in prime_fac_a:
if n not in done:
SCREAMING_SNAKE_CASE_ : int = prime_fac_a.count(lowerCAmelCase__ )
for _ in range(lowerCAmelCase__ ):
ans *= n
done.append(lowerCAmelCase__ )
# precondition
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and (
ans >= 0
), "'ans' must been from type int and positive"
return ans
def _lowerCamelCase( lowerCAmelCase__ : Any ):
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and (n >= 0), "'number' must been a positive int"
SCREAMING_SNAKE_CASE_ : Optional[int] = 0
SCREAMING_SNAKE_CASE_ : Optional[int] = 2 # this variable holds the answer
while index < n:
index += 1
ans += 1 # counts to the next number
# if ans not prime then
# runs to the next prime number.
while not is_prime(lowerCAmelCase__ ):
ans += 1
# precondition
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and is_prime(
lowerCAmelCase__ ), "'ans' must been a prime number and from type int"
return ans
def _lowerCamelCase( lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[int] ):
'''simple docstring'''
assert (
is_prime(lowerCAmelCase__ ) and is_prime(lowerCAmelCase__ ) and (p_number_a < p_number_a)
), "The arguments must been prime numbers and 'pNumber1' < 'pNumber2'"
SCREAMING_SNAKE_CASE_ : str = p_number_a + 1 # jump to the next number
SCREAMING_SNAKE_CASE_ : List[Any] = [] # this list will be returns.
# if number is not prime then
# fetch the next prime number.
while not is_prime(lowerCAmelCase__ ):
number += 1
while number < p_number_a:
ans.append(lowerCAmelCase__ )
number += 1
# fetch the next prime number.
while not is_prime(lowerCAmelCase__ ):
number += 1
# precondition
assert (
isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
and ans[0] != p_number_a
and ans[len(lowerCAmelCase__ ) - 1] != p_number_a
), "'ans' must been a list without the arguments"
# 'ans' contains not 'pNumber1' and 'pNumber2' !
return ans
def _lowerCamelCase( lowerCAmelCase__ : Optional[Any] ):
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and (n >= 1), "'n' must been int and >= 1"
SCREAMING_SNAKE_CASE_ : Optional[Any] = [] # will be returned.
for divisor in range(1 , n + 1 ):
if n % divisor == 0:
ans.append(lowerCAmelCase__ )
# precondition
assert ans[0] == 1 and ans[len(lowerCAmelCase__ ) - 1] == n, "Error in function getDivisiors(...)"
return ans
def _lowerCamelCase( lowerCAmelCase__ : int ):
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and (
number > 1
), "'number' must been an int and >= 1"
SCREAMING_SNAKE_CASE_ : int = get_divisors(lowerCAmelCase__ )
# precondition
assert (
isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
and (divisors[0] == 1)
and (divisors[len(lowerCAmelCase__ ) - 1] == number)
), "Error in help-function getDivisiors(...)"
# summed all divisors up to 'number' (exclusive), hence [:-1]
return sum(divisors[:-1] ) == number
def _lowerCamelCase( lowerCAmelCase__ : Dict , lowerCAmelCase__ : str ):
'''simple docstring'''
assert (
isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
and isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
and (denominator != 0)
), "The arguments must been from type int and 'denominator' != 0"
# build the greatest common divisor of numerator and denominator.
SCREAMING_SNAKE_CASE_ : str = gcd(abs(lowerCAmelCase__ ) , abs(lowerCAmelCase__ ) )
# precondition
assert (
isinstance(lowerCAmelCase__ , lowerCAmelCase__ )
and (numerator % gcd_of_fraction == 0)
and (denominator % gcd_of_fraction == 0)
), "Error in function gcd(...,...)"
return (numerator // gcd_of_fraction, denominator // gcd_of_fraction)
def _lowerCamelCase( lowerCAmelCase__ : Tuple ):
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and (n >= 0), "'n' must been a int and >= 0"
SCREAMING_SNAKE_CASE_ : Optional[Any] = 1 # this will be return.
for factor in range(1 , n + 1 ):
ans *= factor
return ans
def _lowerCamelCase( lowerCAmelCase__ : Tuple ):
'''simple docstring'''
assert isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and (n >= 0), "'n' must been an int and >= 0"
SCREAMING_SNAKE_CASE_ : List[Any] = 0
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1
SCREAMING_SNAKE_CASE_ : int = 1 # this will be return
for _ in range(n - 1 ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = ans
ans += fiba
SCREAMING_SNAKE_CASE_ : List[str] = tmp
return ans | 97 | 0 |
"""simple docstring"""
import argparse
from pathlib import Path
import torch
from packaging import version
from torch.onnx import export
from diffusers import AutoencoderKL
__UpperCamelCase : Union[str, Any] = version.parse(version.parse(torch.__version__).base_version) < version.parse('''1.11''')
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : Optional[int] , _UpperCAmelCase : tuple , _UpperCAmelCase : Path , _UpperCAmelCase : List[str] , _UpperCAmelCase : List[Any] , _UpperCAmelCase : Optional[Any] , _UpperCAmelCase : Tuple , _UpperCAmelCase : Tuple=False , ):
output_path.parent.mkdir(parents=_UpperCAmelCase , exist_ok=_UpperCAmelCase )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
_UpperCAmelCase , _UpperCAmelCase , f=output_path.as_posix() , input_names=_UpperCAmelCase , output_names=_UpperCAmelCase , dynamic_axes=_UpperCAmelCase , do_constant_folding=_UpperCAmelCase , use_external_data_format=_UpperCAmelCase , enable_onnx_checker=_UpperCAmelCase , opset_version=_UpperCAmelCase , )
else:
export(
_UpperCAmelCase , _UpperCAmelCase , f=output_path.as_posix() , input_names=_UpperCAmelCase , output_names=_UpperCAmelCase , dynamic_axes=_UpperCAmelCase , do_constant_folding=_UpperCAmelCase , opset_version=_UpperCAmelCase , )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE (_UpperCAmelCase : str , _UpperCAmelCase : str , _UpperCAmelCase : int , _UpperCAmelCase : bool = False ):
lowerCAmelCase = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowerCAmelCase = 'cuda'
elif fpaa and not torch.cuda.is_available():
raise ValueError('`float16` model export is only supported on GPUs with CUDA' )
else:
lowerCAmelCase = 'cpu'
lowerCAmelCase = Path(_UpperCAmelCase )
# VAE DECODER
lowerCAmelCase = AutoencoderKL.from_pretrained(model_path + '/vae' )
lowerCAmelCase = vae_decoder.config.latent_channels
# forward only through the decoder part
lowerCAmelCase = vae_decoder.decode
onnx_export(
_UpperCAmelCase , model_args=(
torch.randn(1 , _UpperCAmelCase , 25 , 25 ).to(device=_UpperCAmelCase , dtype=_UpperCAmelCase ),
False,
) , output_path=output_path / 'vae_decoder' / 'model.onnx' , ordered_input_names=['latent_sample', 'return_dict'] , output_names=['sample'] , dynamic_axes={
'latent_sample': {0: 'batch', 1: 'channels', 2: 'height', 3: 'width'},
} , opset=_UpperCAmelCase , )
del vae_decoder
if __name__ == "__main__":
__UpperCamelCase : Any = argparse.ArgumentParser()
parser.add_argument(
'''--model_path''',
type=str,
required=True,
help='''Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).''',
)
parser.add_argument('''--output_path''', type=str, required=True, help='''Path to the output model.''')
parser.add_argument(
'''--opset''',
default=14,
type=int,
help='''The version of the ONNX operator set to use.''',
)
parser.add_argument('''--fp16''', action='''store_true''', default=False, help='''Export the models in `float16` mode''')
__UpperCamelCase : int = parser.parse_args()
print(args.output_path)
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
print('''SD: Done: ONNX''')
| 4 |
"""simple docstring"""
import warnings
from typing import Dict
import numpy as np
from ..utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
def __UpperCamelCase ( snake_case__ ):
return 1.0 / (1.0 + np.exp(-_outputs ))
def __UpperCamelCase ( snake_case__ ):
A_ : Union[str, Any] = np.max(_outputs , axis=-1 , keepdims=snake_case__ )
A_ : str = np.exp(_outputs - maxes )
return shifted_exp / shifted_exp.sum(axis=-1 , keepdims=snake_case__ )
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_A : Any = """sigmoid"""
_A : Any = """softmax"""
_A : Union[str, Any] = """none"""
@add_end_docstrings(
_SCREAMING_SNAKE_CASE , R"""
return_all_scores (`bool`, *optional*, defaults to `False`):
Whether to return all prediction scores or just the one of the predicted class.
function_to_apply (`str`, *optional*, defaults to `\"default\"`):
The function to apply to the model outputs in order to retrieve the scores. Accepts four different values:
- `\"default\"`: if the model has a single label, will apply the sigmoid function on the output. If the model
has several labels, will apply the softmax function on the output.
- `\"sigmoid\"`: Applies the sigmoid function on the output.
- `\"softmax\"`: Applies the softmax function on the output.
- `\"none\"`: Does not apply any function on the output.
""" , )
class SCREAMING_SNAKE_CASE ( _SCREAMING_SNAKE_CASE ):
"""simple docstring"""
_A : Optional[int] = False
_A : Dict = ClassificationFunction.NONE
def __init__(self , **lowerCAmelCase_ ):
super().__init__(**lowerCAmelCase_ )
self.check_model_type(
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING )
def lowerCamelCase(self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_="" , **lowerCAmelCase_ ):
# Using "" as default argument because we're going to use `top_k=None` in user code to declare
# "No top_k"
A_ : Union[str, Any] = tokenizer_kwargs
A_ : List[str] = {}
if hasattr(self.model.config , """return_all_scores""" ) and return_all_scores is None:
A_ : Optional[int] = self.model.config.return_all_scores
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) or top_k is None:
A_ : Dict = top_k
A_ : Any = False
elif return_all_scores is not None:
warnings.warn(
"""`return_all_scores` is now deprecated, if want a similar functionality use `top_k=None` instead of"""
""" `return_all_scores=True` or `top_k=1` instead of `return_all_scores=False`.""" , lowerCAmelCase_ , )
if return_all_scores:
A_ : List[Any] = None
else:
A_ : List[str] = 1
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
A_ : str = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
A_ : List[str] = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__(self , *lowerCAmelCase_ , **lowerCAmelCase_ ):
A_ : List[str] = super().__call__(*lowerCAmelCase_ , **lowerCAmelCase_ )
# TODO try and retrieve it in a nicer way from _sanitize_parameters.
A_ : Union[str, Any] = """top_k""" not in kwargs
if isinstance(args[0] , lowerCAmelCase_ ) and _legacy:
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def lowerCamelCase(self , lowerCAmelCase_ , **lowerCAmelCase_ ):
A_ : Union[str, Any] = self.framework
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
return self.tokenizer(**lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(lowerCAmelCase_ ) == 1 and isinstance(inputs[0] , lowerCAmelCase_ ) and len(inputs[0] ) == 2:
# It used to be valid to use a list of list of list for text pairs, keeping this path for BC
return self.tokenizer(
text=inputs[0][0] , text_pair=inputs[0][1] , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
# This is likely an invalid usage of the pipeline attempting to pass text pairs.
raise ValueError(
"""The pipeline received invalid inputs, if you are trying to send text pairs, you can try to send a"""
""" dictionary `{\"text\": \"My text\", \"text_pair\": \"My pair\"}` in order to send a text pair.""" )
return self.tokenizer(lowerCAmelCase_ , return_tensors=lowerCAmelCase_ , **lowerCAmelCase_ )
def lowerCamelCase(self , lowerCAmelCase_ ):
return self.model(**lowerCAmelCase_ )
def lowerCamelCase(self , lowerCAmelCase_ , lowerCAmelCase_=None , lowerCAmelCase_=1 , lowerCAmelCase_=True ):
# `_legacy` is used to determine if we're running the naked pipeline and in backward
# compatibility mode, or if running the pipeline with `pipeline(..., top_k=1)` we're running
# the more natural result containing the list.
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
A_ : Optional[Any] = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
A_ : Tuple = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config , """function_to_apply""" ) and function_to_apply is None:
A_ : Any = self.model.config.function_to_apply
else:
A_ : Dict = ClassificationFunction.NONE
A_ : Optional[Any] = model_outputs["""logits"""][0]
A_ : Tuple = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
A_ : str = sigmoid(lowerCAmelCase_ )
elif function_to_apply == ClassificationFunction.SOFTMAX:
A_ : Dict = softmax(lowerCAmelCase_ )
elif function_to_apply == ClassificationFunction.NONE:
A_ : Optional[int] = outputs
else:
raise ValueError(f"""Unrecognized `function_to_apply` argument: {function_to_apply}""" )
if top_k == 1 and _legacy:
return {"label": self.model.config.idalabel[scores.argmax().item()], "score": scores.max().item()}
A_ : Optional[Any] = [
{"""label""": self.model.config.idalabel[i], """score""": score.item()} for i, score in enumerate(lowerCAmelCase_ )
]
if not _legacy:
dict_scores.sort(key=lambda lowerCAmelCase_ : x["score"] , reverse=lowerCAmelCase_ )
if top_k is not None:
A_ : str = dict_scores[:top_k]
return dict_scores
| 180 | 0 |
from manim import *
class __lowerCamelCase ( lowercase ):
def A__ ( self ) -> Dict:
"""simple docstring"""
UpperCAmelCase: int = Rectangle(height=0.5 , width=0.5 )
UpperCAmelCase: Dict = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
UpperCAmelCase: Any = [mem.copy() for i in range(6 )]
UpperCAmelCase: List[Any] = [mem.copy() for i in range(6 )]
UpperCAmelCase: Optional[int] = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
UpperCAmelCase: Union[str, Any] = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
UpperCAmelCase: Tuple = VGroup(__snake_case , __snake_case ).arrange(__snake_case , buff=0 )
UpperCAmelCase: Union[str, Any] = Text("CPU" , font_size=2_4 )
UpperCAmelCase: Any = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__snake_case )
UpperCAmelCase: str = [mem.copy() for i in range(1 )]
UpperCAmelCase: Any = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
UpperCAmelCase: Union[str, Any] = Text("GPU" , font_size=2_4 )
UpperCAmelCase: Dict = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
gpu.align_to(__snake_case , __snake_case )
gpu.set_x(gpu.get_x() - 1 )
self.add(__snake_case )
UpperCAmelCase: Optional[int] = [mem.copy() for i in range(6 )]
UpperCAmelCase: Optional[Any] = VGroup(*__snake_case ).arrange(__snake_case , buff=0 )
UpperCAmelCase: Any = Text("Model" , font_size=2_4 )
UpperCAmelCase: Dict = Group(__snake_case , __snake_case ).arrange(__snake_case , buff=0.5 , aligned_edge=__snake_case )
model.move_to([3, -1.0, 0] )
self.play(
Create(__snake_case , run_time=1 ) , Create(__snake_case , run_time=1 ) , Create(__snake_case , run_time=1 ) , )
UpperCAmelCase: str = MarkupText(
F'First, an empty model skeleton is loaded\ninto <span fgcolor=\'{YELLOW}\'>memory</span> without using much RAM.' , font_size=2_4 , )
UpperCAmelCase: Tuple = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
UpperCAmelCase: Tuple = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=1_8 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(__snake_case , run_time=2.5 ) , Write(__snake_case ) , Write(__snake_case ) )
self.add(__snake_case )
UpperCAmelCase: Dict = []
UpperCAmelCase: List[str] = []
UpperCAmelCase: int = []
for i, rect in enumerate(__snake_case ):
UpperCAmelCase: int = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(__snake_case , opacity=0.7 )
cpu_target.move_to(__snake_case )
cpu_target.generate_target()
UpperCAmelCase: Dict = 0.46 / 4
UpperCAmelCase: List[Any] = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__snake_case )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=__snake_case , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=__snake_case , buff=0.0 )
cpu_targs.append(__snake_case )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(__snake_case ) )
second_animations.append(MoveToTarget(__snake_case , run_time=1.5 ) )
self.play(*__snake_case )
self.play(*__snake_case )
self.wait()
| 166 |
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCamelCase ( lowercase ):
lowerCamelCase__: Dict = ['''image_processor''', '''tokenizer''']
lowerCamelCase__: List[str] = '''Pix2StructImageProcessor'''
lowerCamelCase__: int = ('''T5Tokenizer''', '''T5TokenizerFast''')
def __init__( self , __snake_case , __snake_case ) -> Optional[int]:
"""simple docstring"""
UpperCAmelCase: Union[str, Any] = False
super().__init__(__snake_case , __snake_case )
def __call__( self , __snake_case=None , __snake_case = None , __snake_case = True , __snake_case = False , __snake_case = None , __snake_case = None , __snake_case = 2_0_4_8 , __snake_case = 0 , __snake_case = None , __snake_case = None , __snake_case = False , __snake_case = False , __snake_case = False , __snake_case = False , __snake_case = False , __snake_case = True , __snake_case = None , **__snake_case , ) -> BatchEncoding:
"""simple docstring"""
if images is None and text is None:
raise ValueError("You have to specify either images or text." )
# Get only text
if images is None and not self.image_processor.is_vqa:
UpperCAmelCase: Optional[Any] = self.tokenizer
UpperCAmelCase: Optional[int] = self.tokenizer(
text=__snake_case , add_special_tokens=__snake_case , padding=__snake_case , truncation=__snake_case , max_length=__snake_case , stride=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , return_overflowing_tokens=__snake_case , return_special_tokens_mask=__snake_case , return_offsets_mapping=__snake_case , return_token_type_ids=__snake_case , return_length=__snake_case , verbose=__snake_case , return_tensors=__snake_case , **__snake_case , )
return text_encoding
if not self.image_processor.is_vqa:
# add pixel_values
UpperCAmelCase: str = self.image_processor(
__snake_case , return_tensors=__snake_case , max_patches=__snake_case , **__snake_case )
else:
# add pixel_values and bbox
UpperCAmelCase: Optional[Any] = self.image_processor(
__snake_case , return_tensors=__snake_case , max_patches=__snake_case , header_text=__snake_case , **__snake_case )
if text is not None and not self.image_processor.is_vqa:
UpperCAmelCase: Dict = self.tokenizer(
text=__snake_case , add_special_tokens=__snake_case , padding=__snake_case , truncation=__snake_case , max_length=__snake_case , stride=__snake_case , pad_to_multiple_of=__snake_case , return_attention_mask=__snake_case , return_overflowing_tokens=__snake_case , return_special_tokens_mask=__snake_case , return_offsets_mapping=__snake_case , return_token_type_ids=__snake_case , return_length=__snake_case , verbose=__snake_case , return_tensors=__snake_case , **__snake_case , )
if "attention_mask" in text_encoding:
UpperCAmelCase: Any = text_encoding.pop("attention_mask" )
if "input_ids" in text_encoding:
UpperCAmelCase: Dict = text_encoding.pop("input_ids" )
else:
UpperCAmelCase: int = None
if text_encoding is not None:
encoding_image_processor.update(__snake_case )
return encoding_image_processor
def A__ ( self , *__snake_case , **__snake_case ) -> Dict:
"""simple docstring"""
return self.tokenizer.batch_decode(*__snake_case , **__snake_case )
def A__ ( self , *__snake_case , **__snake_case ) -> str:
"""simple docstring"""
return self.tokenizer.decode(*__snake_case , **__snake_case )
@property
def A__ ( self ) -> int:
"""simple docstring"""
UpperCAmelCase: int = self.tokenizer.model_input_names
UpperCAmelCase: Union[str, Any] = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
| 166 | 1 |
import inspect
import unittest
class __snake_case ( unittest.TestCase ):
def UpperCAmelCase__ ( self : str):
try:
import diffusers # noqa: F401
except ImportError:
assert False
def UpperCAmelCase__ ( self : Dict):
import diffusers
from diffusers.dependency_versions_table import deps
lowerCAmelCase_ : Any = inspect.getmembers(A_ , inspect.isclass)
for cls_name, cls_module in all_classes:
if "dummy_" in cls_module.__module__:
for backend in cls_module._backends:
if backend == "k_diffusion":
lowerCAmelCase_ : Optional[int] = '''k-diffusion'''
elif backend == "invisible_watermark":
lowerCAmelCase_ : str = '''invisible-watermark'''
assert backend in deps, F"""{backend} is not in the deps table!"""
| 171 |
def UpperCamelCase( __UpperCamelCase : int = 10**12 ):
lowerCAmelCase_ : Tuple = 1
lowerCAmelCase_ : str = 0
lowerCAmelCase_ : Tuple = 1
lowerCAmelCase_ : Dict = 1
while numerator <= 2 * min_total - 1:
prev_numerator += 2 * numerator
numerator += 2 * prev_numerator
prev_denominator += 2 * denominator
denominator += 2 * prev_denominator
return (denominator + 1) // 2
if __name__ == "__main__":
print(F'''{solution() = }''')
| 171 | 1 |
import os
import sys
import tempfile
import unittest
import unittest.mock as mock
from pathlib import Path
from huggingface_hub import HfFolder, delete_repo
from huggingface_hub.file_download import http_get
from requests.exceptions import HTTPError
from transformers import (
AlbertTokenizer,
AutoTokenizer,
BertTokenizer,
BertTokenizerFast,
GPTaTokenizerFast,
is_tokenizers_available,
)
from transformers.testing_utils import TOKEN, USER, is_staging_test, require_tokenizers
from transformers.tokenization_utils import Trie
sys.path.append(str(Path(__file__).parent.parent / 'utils'))
from test_module.custom_tokenization import CustomTokenizer # noqa E402
if is_tokenizers_available():
from test_module.custom_tokenization_fast import CustomTokenizerFast
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def _a (self ):
'''simple docstring'''
lowerCamelCase = mock.Mock()
lowerCamelCase = 5_00
lowerCamelCase = {}
lowerCamelCase = HTTPError
lowerCamelCase = {}
# Download this model to make sure it's in the cache.
lowerCamelCase = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=__a ) as mock_head:
lowerCamelCase = BertTokenizer.from_pretrained("hf-internal-testing/tiny-random-bert" )
# This check we did call the fake head request
mock_head.assert_called()
@require_tokenizers
def _a (self ):
'''simple docstring'''
lowerCamelCase = mock.Mock()
lowerCamelCase = 5_00
lowerCamelCase = {}
lowerCamelCase = HTTPError
lowerCamelCase = {}
# Download this model to make sure it's in the cache.
lowerCamelCase = GPTaTokenizerFast.from_pretrained("gpt2" )
# Under the mock environment we get a 500 error when trying to reach the tokenizer.
with mock.patch("requests.Session.request" , return_value=__a ) as mock_head:
lowerCamelCase = GPTaTokenizerFast.from_pretrained("gpt2" )
# This check we did call the fake head request
mock_head.assert_called()
def _a (self ):
'''simple docstring'''
try:
lowerCamelCase = tempfile.mktemp()
with open(__a , "wb" ) as f:
http_get("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" , __a )
lowerCamelCase = AlbertTokenizer.from_pretrained(__a )
finally:
os.remove(__a )
# Supporting this legacy load introduced a weird bug where the tokenizer would load local files if they are in
# the current folder and have the right name.
if os.path.isfile("tokenizer.json" ):
# We skip the test if the user has a `tokenizer.json` in this folder to avoid deleting it.
return
try:
with open("tokenizer.json" , "wb" ) as f:
http_get("https://huggingface.co/hf-internal-testing/tiny-random-bert/blob/main/tokenizer.json" , __a )
lowerCamelCase = AutoTokenizer.from_pretrained("hf-internal-testing/tiny-random-gpt2" )
# The tiny random BERT has a vocab size of 1024, tiny gpt2 as a vocab size of 1000
self.assertEqual(tokenizer.vocab_size , 10_00 )
# Tokenizer should depend on the remote checkpoint, not the local tokenizer.json file.
finally:
os.remove("tokenizer.json" )
def _a (self ):
'''simple docstring'''
lowerCamelCase = AlbertTokenizer.from_pretrained("https://huggingface.co/albert-base-v1/resolve/main/spiece.model" )
@is_staging_test
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
_A = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def _a (cls ):
'''simple docstring'''
lowerCamelCase = TOKEN
HfFolder.save_token(__a )
@classmethod
def _a (cls ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id="test-tokenizer" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="valid_org/test-tokenizer-org" )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id="test-dynamic-tokenizer" )
except HTTPError:
pass
def _a (self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase = os.path.join(__a , "vocab.txt" )
with open(__a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
lowerCamelCase = BertTokenizer(__a )
tokenizer.push_to_hub("test-tokenizer" , use_auth_token=self._token )
lowerCamelCase = BertTokenizer.from_pretrained(F"""{USER}/test-tokenizer""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="test-tokenizer" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(__a , repo_id="test-tokenizer" , push_to_hub=__a , use_auth_token=self._token )
lowerCamelCase = BertTokenizer.from_pretrained(F"""{USER}/test-tokenizer""" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
def _a (self ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase = os.path.join(__a , "vocab.txt" )
with open(__a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
lowerCamelCase = BertTokenizer(__a )
tokenizer.push_to_hub("valid_org/test-tokenizer-org" , use_auth_token=self._token )
lowerCamelCase = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
# Reset repo
delete_repo(token=self._token , repo_id="valid_org/test-tokenizer-org" )
# Push to hub via save_pretrained
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer.save_pretrained(
__a , repo_id="valid_org/test-tokenizer-org" , push_to_hub=__a , use_auth_token=self._token )
lowerCamelCase = BertTokenizer.from_pretrained("valid_org/test-tokenizer-org" )
self.assertDictEqual(new_tokenizer.vocab , tokenizer.vocab )
@require_tokenizers
def _a (self ):
'''simple docstring'''
CustomTokenizer.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase = os.path.join(__a , "vocab.txt" )
with open(__a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
lowerCamelCase = CustomTokenizer(__a )
# No fast custom tokenizer
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
lowerCamelCase = AutoTokenizer.from_pretrained(F"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=__a )
# Can't make an isinstance check because the new_model.config is from the CustomTokenizer class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
# Fast and slow custom tokenizer
CustomTokenizerFast.register_for_auto_class()
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCamelCase = os.path.join(__a , "vocab.txt" )
with open(__a , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in self.vocab_tokens] ) )
lowerCamelCase = BertTokenizerFast.from_pretrained(__a )
bert_tokenizer.save_pretrained(__a )
lowerCamelCase = CustomTokenizerFast.from_pretrained(__a )
tokenizer.push_to_hub("test-dynamic-tokenizer" , use_auth_token=self._token )
lowerCamelCase = AutoTokenizer.from_pretrained(F"""{USER}/test-dynamic-tokenizer""" , trust_remote_code=__a )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizerFast" )
lowerCamelCase = AutoTokenizer.from_pretrained(
F"""{USER}/test-dynamic-tokenizer""" , use_fast=__a , trust_remote_code=__a )
# Can't make an isinstance check because the new_model.config is from the FakeConfig class of a dynamic module
self.assertEqual(tokenizer.__class__.__name__ , "CustomTokenizer" )
class lowerCamelCase__ ( unittest.TestCase):
"""simple docstring"""
def _a (self ):
'''simple docstring'''
lowerCamelCase = Trie()
trie.add("Hello 友達" )
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {" ": {"友": {"達": {"": 1}}}}}}}}} )
trie.add("Hello" )
trie.data
self.assertEqual(trie.data , {"H": {"e": {"l": {"l": {"o": {"": 1, " ": {"友": {"達": {"": 1}}}}}}}}} )
def _a (self ):
'''simple docstring'''
lowerCamelCase = Trie()
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS] This is a extra_id_100"] )
trie.add("[CLS]" )
trie.add("extra_id_1" )
trie.add("extra_id_100" )
self.assertEqual(trie.split("[CLS] This is a extra_id_100" ) , ["[CLS]", " This is a ", "extra_id_100"] )
def _a (self ):
'''simple docstring'''
lowerCamelCase = Trie()
trie.add("A" )
self.assertEqual(trie.split("ABC" ) , ["A", "BC"] )
self.assertEqual(trie.split("BCA" ) , ["BC", "A"] )
def _a (self ):
'''simple docstring'''
lowerCamelCase = Trie()
trie.add("TOKEN]" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def _a (self ):
'''simple docstring'''
lowerCamelCase = Trie()
trie.add("A" )
trie.add("P" )
trie.add("[SPECIAL_TOKEN]" )
self.assertEqual(trie.split("This is something [SPECIAL_TOKEN]" ) , ["This is something ", "[SPECIAL_TOKEN]"] )
def _a (self ):
'''simple docstring'''
lowerCamelCase = Trie()
trie.add("AB" )
trie.add("B" )
trie.add("C" )
self.assertEqual(trie.split("ABC" ) , ["AB", "C"] )
def _a (self ):
'''simple docstring'''
lowerCamelCase = Trie()
trie.add("ABC" )
trie.add("B" )
trie.add("CD" )
self.assertEqual(trie.split("ABCD" ) , ["ABC", "D"] )
def _a (self ):
'''simple docstring'''
lowerCamelCase = Trie()
lowerCamelCase = trie.cut_text("ABC" , [0, 0, 2, 1, 2, 3] )
self.assertEqual(__a , ["AB", "C"] ) | 484 |
from math import pi
def __lowercase( UpperCAmelCase__ , UpperCAmelCase__ ):
"""simple docstring"""
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(9_0, 1_0)) | 484 | 1 |
"""simple docstring"""
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[str]:
return (pointa[0] - pointa[0]) ** 2 + (pointa[1] - pointa[1]) ** 2
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=0 ) -> str:
return sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE : x[column] )
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=float("inf" ) ) -> int:
for i in range(points_counts - 1 ):
for j in range(i + 1 , __SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: List[str] = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__lowerCAmelCase: Union[str, Any] = current_dis
return min_dis
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=float("inf" ) ) -> Tuple:
for i in range(min(6 , points_counts - 1 ) , __SCREAMING_SNAKE_CASE ):
for j in range(max(0 , i - 6 ) , __SCREAMING_SNAKE_CASE ):
__lowerCAmelCase: Any = euclidean_distance_sqr(points[i] , points[j] )
if current_dis < min_dis:
__lowerCAmelCase: Tuple = current_dis
return min_dis
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
# base case
if points_counts <= 3:
return dis_between_closest_pair(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# recursion
__lowerCAmelCase: Dict = points_counts // 2
__lowerCAmelCase: Any = closest_pair_of_points_sqr(
__SCREAMING_SNAKE_CASE , points_sorted_on_y[:mid] , __SCREAMING_SNAKE_CASE )
__lowerCAmelCase: List[str] = closest_pair_of_points_sqr(
__SCREAMING_SNAKE_CASE , points_sorted_on_y[mid:] , points_counts - mid )
__lowerCAmelCase: int = min(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
__lowerCAmelCase: List[str] = []
for point in points_sorted_on_x:
if abs(point[0] - points_sorted_on_x[mid][0] ) < closest_pair_dis:
cross_strip.append(__SCREAMING_SNAKE_CASE )
__lowerCAmelCase: Dict = dis_between_closest_in_strip(
__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
return min(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def a__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
__lowerCAmelCase: List[str] = column_based_sort(__SCREAMING_SNAKE_CASE , column=0 )
__lowerCAmelCase: List[Any] = column_based_sort(__SCREAMING_SNAKE_CASE , column=1 )
return (
closest_pair_of_points_sqr(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
) ** 0.5
if __name__ == "__main__":
__A = [(2, 3), (12, 30), (40, 50), (5, 1), (12, 10), (3, 4)]
print("Distance:", closest_pair_of_points(points, len(points)))
| 346 |
"""simple docstring"""
def a__ ( __SCREAMING_SNAKE_CASE ) -> int:
if n == 1 or not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
return 0
elif n == 2:
return 1
else:
__lowerCAmelCase: Tuple = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def a__ ( __SCREAMING_SNAKE_CASE ) -> int:
__lowerCAmelCase: str = 0
__lowerCAmelCase: Any = 2
while digits < n:
index += 1
__lowerCAmelCase: Optional[int] = len(str(fibonacci(__SCREAMING_SNAKE_CASE ) ) )
return index
def a__ ( __SCREAMING_SNAKE_CASE = 1_0_0_0 ) -> int:
return fibonacci_digits_index(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 346 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
def __init__( self , __UpperCamelCase , __UpperCamelCase=13 , __UpperCamelCase=3 , __UpperCamelCase=224 , __UpperCamelCase=30 , __UpperCamelCase=400 , __UpperCamelCase=True , __UpperCamelCase=None , __UpperCamelCase=True , __UpperCamelCase=[0.5, 0.5, 0.5] , __UpperCamelCase=[0.5, 0.5, 0.5] , ) -> Dict:
_a = size if size is not None else {"height": 18, "width": 18}
_a = parent
_a = batch_size
_a = num_channels
_a = image_size
_a = min_resolution
_a = max_resolution
_a = do_resize
_a = size
_a = do_normalize
_a = image_mean
_a = image_std
def a_ ( self ) -> List[Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ , unittest.TestCase ):
UpperCAmelCase = ViTImageProcessor if is_vision_available() else None
def a_ ( self ) -> Optional[int]:
_a = EfficientFormerImageProcessorTester(self )
@property
def a_ ( self ) -> Optional[Any]:
return self.image_proc_tester.prepare_image_processor_dict()
def a_ ( self ) -> List[str]:
_a = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , "image_mean" ) )
self.assertTrue(hasattr(__UpperCamelCase , "image_std" ) )
self.assertTrue(hasattr(__UpperCamelCase , "do_normalize" ) )
self.assertTrue(hasattr(__UpperCamelCase , "do_resize" ) )
self.assertTrue(hasattr(__UpperCamelCase , "size" ) )
def a_ ( self ) -> Dict:
pass
def a_ ( self ) -> Dict:
# Initialize image_processor
_a = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_a = prepare_image_inputs(self.image_proc_tester , equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
_a = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
_a = image_processor(__UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def a_ ( self ) -> str:
# Initialize image_processor
_a = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_a = prepare_image_inputs(self.image_proc_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray )
# Test not batched input
_a = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
_a = image_processor(__UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
def a_ ( self ) -> List[str]:
# Initialize image_processor
_a = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_a = prepare_image_inputs(self.image_proc_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test not batched input
_a = image_processor(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
# Test batched
_a = image_processor(__UpperCamelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["height"],
self.image_proc_tester.size["width"],
) , )
| 714 |
'''simple docstring'''
from manim import *
class __SCREAMING_SNAKE_CASE ( lowerCamelCase__ ):
def a_ ( self ) -> str:
_a = Rectangle(height=0.5 , width=0.5 )
_a = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
_a = [mem.copy() for i in range(6 )]
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
_a = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
_a = VGroup(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
_a = Text("CPU" , font_size=24 )
_a = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
cpu.move_to([-2.5, -0.5, 0] )
self.add(__UpperCamelCase )
_a = [mem.copy() for i in range(1 )]
_a = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
_a = Text("GPU" , font_size=24 )
_a = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
gpu.align_to(__UpperCamelCase , __UpperCamelCase )
gpu.set_x(gpu.get_x() - 1 )
self.add(__UpperCamelCase )
_a = [mem.copy() for i in range(6 )]
_a = VGroup(*__UpperCamelCase ).arrange(__UpperCamelCase , buff=0 )
_a = Text("Model" , font_size=24 )
_a = Group(__UpperCamelCase , __UpperCamelCase ).arrange(__UpperCamelCase , buff=0.5 , aligned_edge=__UpperCamelCase )
model.move_to([3, -1.0, 0] )
self.play(
Create(__UpperCamelCase , run_time=1 ) , Create(__UpperCamelCase , run_time=1 ) , Create(__UpperCamelCase , run_time=1 ) , )
_a = MarkupText(
f"First, an empty model skeleton is loaded\ninto <span fgcolor='{YELLOW}'>memory</span> without using much RAM." , font_size=24 , )
_a = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
_a = MarkupText(
f"<b>Key:</b>\n\n<span fgcolor='{YELLOW}'>●</span> Empty Model" , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
step_a.move_to([2, 2, 0] )
self.play(Write(__UpperCamelCase , run_time=2.5 ) , Write(__UpperCamelCase ) , Write(__UpperCamelCase ) )
self.add(__UpperCamelCase )
_a = []
_a = []
_a = []
for i, rect in enumerate(__UpperCamelCase ):
_a = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0.0 ).set_fill(__UpperCamelCase , opacity=0.7 )
cpu_target.move_to(__UpperCamelCase )
cpu_target.generate_target()
_a = 0.46 / 4
_a = 0.46 / 3
if i == 0:
cpu_target.target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=__UpperCamelCase )
cpu_target.target.set_x(cpu_target.target.get_x() + 0.1 )
elif i == 3:
cpu_target.target.next_to(cpu_targs[0].target , direction=__UpperCamelCase , buff=0.0 )
else:
cpu_target.target.next_to(cpu_targs[i - 1].target , direction=__UpperCamelCase , buff=0.0 )
cpu_targs.append(__UpperCamelCase )
first_animations.append(rect.animate(run_time=0.5 ).set_stroke(__UpperCamelCase ) )
second_animations.append(MoveToTarget(__UpperCamelCase , run_time=1.5 ) )
self.play(*__UpperCamelCase )
self.play(*__UpperCamelCase )
self.wait()
| 276 | 0 |
import requests
from bsa import BeautifulSoup
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> str:
snake_case__ = BeautifulSoup(requests.get(__lowerCAmelCase , params=__lowerCAmelCase ).content , '''html.parser''' )
snake_case__ = soup.find('''div''' , attrs={'''class''': '''gs_ri'''} )
snake_case__ = div.find('''div''' , attrs={'''class''': '''gs_fl'''} ).find_all('''a''' )
return anchors[2].get_text()
if __name__ == "__main__":
lowerCamelCase__ : List[Any] = {
"""title""": (
"""Precisely geometry controlled microsupercapacitors for ultrahigh areal """
"""capacitance, volumetric capacitance, and energy density"""
),
"""journal""": """Chem. Mater.""",
"""volume""": 3_0,
"""pages""": """3979-3990""",
"""year""": 2_0_1_8,
"""hl""": """en""",
}
print(get_citation("""https://scholar.google.com/scholar_lookup""", params=params))
| 33 | import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class _snake_case ( _lowercase , unittest.TestCase ):
lowerCamelCase__: List[Any] = KandinskyImgaImgPipeline
lowerCamelCase__: int = ["prompt", "image_embeds", "negative_image_embeds", "image"]
lowerCamelCase__: List[str] = [
"prompt",
"negative_prompt",
"image_embeds",
"negative_image_embeds",
"image",
]
lowerCamelCase__: List[str] = [
"generator",
"height",
"width",
"strength",
"guidance_scale",
"negative_prompt",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
lowerCamelCase__: Dict = False
@property
def _lowerCamelCase ( self: List[str] ) -> Optional[int]:
return 32
@property
def _lowerCamelCase ( self: Optional[int] ) -> Optional[Any]:
return 32
@property
def _lowerCamelCase ( self: List[Any] ) -> int:
return self.time_input_dim
@property
def _lowerCamelCase ( self: Any ) -> Any:
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self: str ) -> str:
return 1_00
@property
def _lowerCamelCase ( self: Dict ) -> Union[str, Any]:
__UpperCAmelCase : int = XLMRobertaTokenizerFast.from_pretrained("YiYiXu/tiny-random-mclip-base" )
return tokenizer
@property
def _lowerCamelCase ( self: str ) -> Optional[Any]:
torch.manual_seed(0 )
__UpperCAmelCase : List[Any] = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=10_05 , )
__UpperCAmelCase : Union[str, Any] = MultilingualCLIP(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = text_encoder.eval()
return text_encoder
@property
def _lowerCamelCase ( self: Any ) -> List[str]:
torch.manual_seed(0 )
__UpperCAmelCase : List[Any] = {
"in_channels": 4,
# Out channels is double in channels because predicts mean and variance
"out_channels": 8,
"addition_embed_type": "text_image",
"down_block_types": ("ResnetDownsampleBlock2D", "SimpleCrossAttnDownBlock2D"),
"up_block_types": ("SimpleCrossAttnUpBlock2D", "ResnetUpsampleBlock2D"),
"mid_block_type": "UNetMidBlock2DSimpleCrossAttn",
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"layers_per_block": 1,
"encoder_hid_dim": self.text_embedder_hidden_size,
"encoder_hid_dim_type": "text_image_proj",
"cross_attention_dim": self.cross_attention_dim,
"attention_head_dim": 4,
"resnet_time_scale_shift": "scale_shift",
"class_embed_type": None,
}
__UpperCAmelCase : Optional[int] = UNetaDConditionModel(**__lowerCamelCase )
return model
@property
def _lowerCamelCase ( self: Union[str, Any] ) -> str:
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def _lowerCamelCase ( self: List[str] ) -> List[str]:
torch.manual_seed(0 )
__UpperCAmelCase : int = VQModel(**self.dummy_movq_kwargs )
return model
def _lowerCamelCase ( self: str ) -> Optional[int]:
__UpperCAmelCase : Dict = self.dummy_text_encoder
__UpperCAmelCase : Tuple = self.dummy_tokenizer
__UpperCAmelCase : Dict = self.dummy_unet
__UpperCAmelCase : List[str] = self.dummy_movq
__UpperCAmelCase : Any = {
"num_train_timesteps": 10_00,
"beta_schedule": "linear",
"beta_start": 0.0_00_85,
"beta_end": 0.0_12,
"clip_sample": False,
"set_alpha_to_one": False,
"steps_offset": 0,
"prediction_type": "epsilon",
"thresholding": False,
}
__UpperCAmelCase : str = DDIMScheduler(**__lowerCamelCase )
__UpperCAmelCase : str = {
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"unet": unet,
"scheduler": scheduler,
"movq": movq,
}
return components
def _lowerCamelCase ( self: int , __lowerCamelCase: Tuple , __lowerCamelCase: Tuple=0 ) -> Optional[int]:
__UpperCAmelCase : str = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
__UpperCAmelCase : str = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__lowerCamelCase )
# create init_image
__UpperCAmelCase : Dict = floats_tensor((1, 3, 64, 64) , rng=random.Random(__lowerCamelCase ) ).to(__lowerCamelCase )
__UpperCAmelCase : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__UpperCAmelCase : Union[str, Any] = Image.fromarray(np.uinta(__lowerCamelCase ) ).convert("RGB" ).resize((2_56, 2_56) )
if str(__lowerCamelCase ).startswith("mps" ):
__UpperCAmelCase : Any = torch.manual_seed(__lowerCamelCase )
else:
__UpperCAmelCase : Dict = torch.Generator(device=__lowerCamelCase ).manual_seed(__lowerCamelCase )
__UpperCAmelCase : Tuple = {
"prompt": "horse",
"image": init_image,
"image_embeds": image_embeds,
"negative_image_embeds": negative_image_embeds,
"generator": generator,
"height": 64,
"width": 64,
"num_inference_steps": 10,
"guidance_scale": 7.0,
"strength": 0.2,
"output_type": "np",
}
return inputs
def _lowerCamelCase ( self: Any ) -> List[str]:
__UpperCAmelCase : int = "cpu"
__UpperCAmelCase : Any = self.get_dummy_components()
__UpperCAmelCase : int = self.pipeline_class(**__lowerCamelCase )
__UpperCAmelCase : List[str] = pipe.to(__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
__UpperCAmelCase : Any = pipe(**self.get_dummy_inputs(__lowerCamelCase ) )
__UpperCAmelCase : Tuple = output.images
__UpperCAmelCase : Dict = pipe(
**self.get_dummy_inputs(__lowerCamelCase ) , return_dict=__lowerCamelCase , )[0]
__UpperCAmelCase : int = image[0, -3:, -3:, -1]
__UpperCAmelCase : List[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__UpperCAmelCase : Dict = np.array(
[0.61_47_49_43, 0.6_07_35_39, 0.43_30_85_44, 0.5_92_82_69, 0.47_49_35_95, 0.46_75_59_73, 0.4_61_38_38, 0.45_36_87_97, 0.50_11_92_33] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class _snake_case ( unittest.TestCase ):
def _lowerCamelCase ( self: int ) -> Tuple:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self: int ) -> List[Any]:
__UpperCAmelCase : Optional[int] = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
"/kandinsky/kandinsky_img2img_frog.npy" )
__UpperCAmelCase : Optional[Any] = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main" "/kandinsky/cat.png" )
__UpperCAmelCase : Optional[int] = "A red cartoon frog, 4k"
__UpperCAmelCase : str = KandinskyPriorPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1-prior" , torch_dtype=torch.floataa )
pipe_prior.to(__lowerCamelCase )
__UpperCAmelCase : List[str] = KandinskyImgaImgPipeline.from_pretrained(
"kandinsky-community/kandinsky-2-1" , torch_dtype=torch.floataa )
__UpperCAmelCase : Optional[Any] = pipeline.to(__lowerCamelCase )
pipeline.set_progress_bar_config(disable=__lowerCamelCase )
__UpperCAmelCase : Optional[int] = torch.Generator(device="cpu" ).manual_seed(0 )
__UpperCAmelCase , __UpperCAmelCase : Dict = pipe_prior(
__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=5 , negative_prompt="" , ).to_tuple()
__UpperCAmelCase : List[Any] = pipeline(
__lowerCamelCase , image=__lowerCamelCase , image_embeds=__lowerCamelCase , negative_image_embeds=__lowerCamelCase , generator=__lowerCamelCase , num_inference_steps=1_00 , height=7_68 , width=7_68 , strength=0.2 , output_type="np" , )
__UpperCAmelCase : Any = output.images[0]
assert image.shape == (7_68, 7_68, 3)
assert_mean_pixel_difference(__lowerCamelCase , __lowerCamelCase )
| 382 | 0 |
"""simple docstring"""
import os
import pytest
from attr import dataclass
UpperCamelCase = """us-east-1""" # defaults region
@dataclass
class lowercase_ :
A__ : str
A__ : int = '''arn:aws:iam::558105141721:role/sagemaker_execution_role'''
A__ : int = {
'''task_name''': '''mnli''',
'''per_device_train_batch_size''': 16,
'''per_device_eval_batch_size''': 16,
'''do_train''': True,
'''do_eval''': True,
'''do_predict''': True,
'''output_dir''': '''/opt/ml/model''',
'''overwrite_output_dir''': True,
'''max_steps''': 500,
'''save_steps''': 5500,
}
A__ : Any = {**hyperparameters, '''max_steps''': 1000}
@property
def lowerCamelCase__ ( self ) ->str:
'''simple docstring'''
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def lowerCamelCase__ ( self ) ->str:
'''simple docstring'''
return f'''{self.framework}-transfromers-test'''
@property
def lowerCamelCase__ ( self ) ->str:
'''simple docstring'''
return f'''./tests/sagemaker/scripts/{self.framework}'''
@property
def lowerCamelCase__ ( self ) ->str:
'''simple docstring'''
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="class" )
def lowerCAmelCase ( UpperCamelCase_: List[Any] ) -> str:
'''simple docstring'''
_a = SageMakerTestEnvironment(framework=request.cls.framework )
| 700 |
"""simple docstring"""
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
UpperCamelCase = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class lowercase_ :
A__ : str = field(
default=_UpperCAmelCase, metadata={'''help''': '''Model type selected in the list: ''' + ''', '''.join(_UpperCAmelCase )} )
A__ : str = field(
default=_UpperCAmelCase, metadata={'''help''': '''The input data dir. Should contain the .json files for the SQuAD task.'''} )
A__ : int = field(
default=128, metadata={
'''help''': (
'''The maximum total input sequence length after tokenization. Sequences longer '''
'''than this will be truncated, sequences shorter will be padded.'''
)
}, )
A__ : int = field(
default=128, metadata={'''help''': '''When splitting up a long document into chunks, how much stride to take between chunks.'''}, )
A__ : int = field(
default=64, metadata={
'''help''': (
'''The maximum number of tokens for the question. Questions longer than this will '''
'''be truncated to this length.'''
)
}, )
A__ : int = field(
default=30, metadata={
'''help''': (
'''The maximum length of an answer that can be generated. This is needed because the start '''
'''and end predictions are not conditioned on one another.'''
)
}, )
A__ : bool = field(
default=_UpperCAmelCase, metadata={'''help''': '''Overwrite the cached training and evaluation sets'''} )
A__ : bool = field(
default=_UpperCAmelCase, metadata={'''help''': '''If true, the SQuAD examples contain some that do not have an answer.'''} )
A__ : float = field(
default=0.0, metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
A__ : int = field(
default=20, metadata={'''help''': '''If null_score - best_non_null is greater than the threshold predict null.'''} )
A__ : int = field(
default=0, metadata={
'''help''': (
'''language id of input for language-specific xlm models (see'''
''' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'''
)
}, )
A__ : int = field(default=1, metadata={'''help''': '''multiple threads for converting example to features'''} )
class lowercase_ (_UpperCAmelCase ):
A__ : Tuple = '''train'''
A__ : List[Any] = '''dev'''
class lowercase_ (_UpperCAmelCase ):
A__ : SquadDataTrainingArguments
A__ : List[SquadFeatures]
A__ : Split
A__ : bool
def __init__( self , a_ , a_ , a_ = None , a_ = Split.train , a_ = False , a_ = None , a_ = "pt" , ) ->List[str]:
'''simple docstring'''
_a = args
_a = is_language_sensitive
_a = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(a_ , a_ ):
try:
_a = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
_a = mode
# Load data features from cache or dataset file
_a = "v2" if args.version_2_with_negative else "v1"
_a = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
_a = cached_features_file + ".lock"
with FileLock(a_ ):
if os.path.exists(a_ ) and not args.overwrite_cache:
_a = time.time()
_a = torch.load(a_ )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
_a = self.old_features["features"]
_a = self.old_features.get("dataset" , a_ )
_a = self.old_features.get("examples" , a_ )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
" future run" )
else:
if mode == Split.dev:
_a = self.processor.get_dev_examples(args.data_dir )
else:
_a = self.processor.get_train_examples(args.data_dir )
_a , _a = squad_convert_examples_to_features(
examples=self.examples , tokenizer=a_ , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=a_ , )
_a = time.time()
torch.save(
{"features": self.features, "dataset": self.dataset, "examples": self.examples} , a_ , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self ) ->Optional[int]:
'''simple docstring'''
return len(self.features )
def __getitem__( self , a_ ) ->Dict[str, torch.Tensor]:
'''simple docstring'''
_a = self.features[i]
_a = torch.tensor(feature.input_ids , dtype=torch.long )
_a = torch.tensor(feature.attention_mask , dtype=torch.long )
_a = torch.tensor(feature.token_type_ids , dtype=torch.long )
_a = torch.tensor(feature.cls_index , dtype=torch.long )
_a = torch.tensor(feature.p_mask , dtype=torch.float )
_a = torch.tensor(feature.is_impossible , dtype=torch.float )
_a = {
"input_ids": input_ids,
"attention_mask": attention_mask,
"token_type_ids": token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({"cls_index": cls_index, "p_mask": p_mask} )
if self.args.version_2_with_negative:
inputs.update({"is_impossible": is_impossible} )
if self.is_language_sensitive:
inputs.update({"langs": (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
_a = torch.tensor(feature.start_position , dtype=torch.long )
_a = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({"start_positions": start_positions, "end_positions": end_positions} )
return inputs
| 612 | 0 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
__lowerCAmelCase = logging.get_logger(__name__)
class lowerCamelCase ( __lowerCamelCase ):
UpperCamelCase_ : List[str] = ['pixel_values']
def __init__( self :List[str] , lowercase :bool = True , lowercase :Optional[Dict[str, int]] = None , lowercase :PILImageResampling = PILImageResampling.BILINEAR , lowercase :bool = True , lowercase :Dict[str, int] = None , lowercase :bool = True , lowercase :Union[int, float] = 1 / 2_5_5 , lowercase :bool = True , lowercase :Optional[Union[float, List[float]]] = None , lowercase :Optional[Union[float, List[float]]] = None , **lowercase :Union[str, Any] , ) -> None:
"""simple docstring"""
super().__init__(**lowercase )
SCREAMING_SNAKE_CASE = size if size is not None else {'''shortest_edge''': 2_5_6}
SCREAMING_SNAKE_CASE = get_size_dict(lowercase , default_to_square=lowercase )
SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {'''height''': 2_2_4, '''width''': 2_2_4}
SCREAMING_SNAKE_CASE = get_size_dict(lowercase , param_name='''crop_size''' )
SCREAMING_SNAKE_CASE = do_resize
SCREAMING_SNAKE_CASE = size
SCREAMING_SNAKE_CASE = resample
SCREAMING_SNAKE_CASE = do_center_crop
SCREAMING_SNAKE_CASE = crop_size
SCREAMING_SNAKE_CASE = do_rescale
SCREAMING_SNAKE_CASE = rescale_factor
SCREAMING_SNAKE_CASE = do_normalize
SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
SCREAMING_SNAKE_CASE = image_std if image_std is not None else IMAGENET_STANDARD_STD
def snake_case__ ( self :Optional[Any] , lowercase :np.ndarray , lowercase :Dict[str, int] , lowercase :PILImageResampling = PILImageResampling.BICUBIC , lowercase :Optional[Union[str, ChannelDimension]] = None , **lowercase :Dict , ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE = get_size_dict(lowercase , default_to_square=lowercase )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
SCREAMING_SNAKE_CASE = get_resize_output_image_size(lowercase , size=size['''shortest_edge'''] , default_to_square=lowercase )
return resize(lowercase , size=lowercase , resample=lowercase , data_format=lowercase , **lowercase )
def snake_case__ ( self :Optional[int] , lowercase :np.ndarray , lowercase :Dict[str, int] , lowercase :Optional[Union[str, ChannelDimension]] = None , **lowercase :Optional[Any] , ) -> np.ndarray:
"""simple docstring"""
SCREAMING_SNAKE_CASE = get_size_dict(lowercase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(lowercase , size=(size['''height'''], size['''width''']) , data_format=lowercase , **lowercase )
def snake_case__ ( self :Optional[int] , lowercase :np.ndarray , lowercase :float , lowercase :Optional[Union[str, ChannelDimension]] = None , **lowercase :List[Any] ) -> np.ndarray:
"""simple docstring"""
return rescale(lowercase , scale=lowercase , data_format=lowercase , **lowercase )
def snake_case__ ( self :List[Any] , lowercase :np.ndarray , lowercase :Union[float, List[float]] , lowercase :Union[float, List[float]] , lowercase :Optional[Union[str, ChannelDimension]] = None , **lowercase :List[str] , ) -> np.ndarray:
"""simple docstring"""
return normalize(lowercase , mean=lowercase , std=lowercase , data_format=lowercase , **lowercase )
def snake_case__ ( self :List[Any] , lowercase :ImageInput , lowercase :Optional[bool] = None , lowercase :Dict[str, int] = None , lowercase :PILImageResampling = None , lowercase :bool = None , lowercase :Dict[str, int] = None , lowercase :Optional[bool] = None , lowercase :Optional[float] = None , lowercase :Optional[bool] = None , lowercase :Optional[Union[float, List[float]]] = None , lowercase :Optional[Union[float, List[float]]] = None , lowercase :Optional[Union[str, TensorType]] = None , lowercase :Union[str, ChannelDimension] = ChannelDimension.FIRST , **lowercase :Any , ) -> Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
SCREAMING_SNAKE_CASE = size if size is not None else self.size
SCREAMING_SNAKE_CASE = get_size_dict(lowercase , default_to_square=lowercase )
SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
SCREAMING_SNAKE_CASE = do_center_crop if do_center_crop is not None else self.do_center_crop
SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else self.crop_size
SCREAMING_SNAKE_CASE = get_size_dict(lowercase , param_name='''crop_size''' )
SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean
SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std
SCREAMING_SNAKE_CASE = make_list_of_images(lowercase )
if not valid_images(lowercase ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_center_crop and crop_size is None:
raise ValueError('''Crop size must be specified if do_center_crop is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''Image mean and std must be specified if do_normalize is True.''' )
# All transformations expect numpy arrays.
SCREAMING_SNAKE_CASE = [to_numpy_array(lowercase ) for image in images]
if do_resize:
SCREAMING_SNAKE_CASE = [self.resize(image=lowercase , size=lowercase , resample=lowercase ) for image in images]
if do_center_crop:
SCREAMING_SNAKE_CASE = [self.center_crop(image=lowercase , size=lowercase ) for image in images]
if do_rescale:
SCREAMING_SNAKE_CASE = [self.rescale(image=lowercase , scale=lowercase ) for image in images]
if do_normalize:
SCREAMING_SNAKE_CASE = [self.normalize(image=lowercase , mean=lowercase , std=lowercase ) for image in images]
SCREAMING_SNAKE_CASE = [to_channel_dimension_format(lowercase , lowercase ) for image in images]
SCREAMING_SNAKE_CASE = {'''pixel_values''': images}
return BatchFeature(data=lowercase , tensor_type=lowercase )
def snake_case__ ( self :Optional[Any] , lowercase :List[str] , lowercase :List[Tuple] = None ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(lowercase ) != len(lowercase ):
raise ValueError(
'''Make sure that you pass in as many target sizes as the batch dimension of the logits''' )
if is_torch_tensor(lowercase ):
SCREAMING_SNAKE_CASE = target_sizes.numpy()
SCREAMING_SNAKE_CASE = []
for idx in range(len(lowercase ) ):
SCREAMING_SNAKE_CASE = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode='''bilinear''' , align_corners=lowercase )
SCREAMING_SNAKE_CASE = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(lowercase )
else:
SCREAMING_SNAKE_CASE = logits.argmax(dim=1 )
SCREAMING_SNAKE_CASE = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 201 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCAmelCase = logging.get_logger(__name__)
__lowerCAmelCase = {
'alibaba-damo/mgp-str-base': 'https://huggingface.co/alibaba-damo/mgp-str-base/resolve/main/config.json',
}
class lowerCamelCase ( __lowerCamelCase ):
UpperCamelCase_ : Union[str, Any] = 'mgp-str'
def __init__( self :Union[str, Any] , lowercase :Union[str, Any]=[3_2, 1_2_8] , lowercase :Tuple=4 , lowercase :Optional[int]=3 , lowercase :Union[str, Any]=2_7 , lowercase :List[str]=3_8 , lowercase :Optional[Any]=5_0_2_5_7 , lowercase :str=3_0_5_2_2 , lowercase :Dict=7_6_8 , lowercase :List[Any]=1_2 , lowercase :List[str]=1_2 , lowercase :Optional[Any]=4.0 , lowercase :Optional[Any]=True , lowercase :str=False , lowercase :Optional[Any]=1e-5 , lowercase :List[str]=0.0 , lowercase :List[Any]=0.0 , lowercase :Optional[int]=0.0 , lowercase :Dict=False , lowercase :Optional[Any]=0.02 , **lowercase :Optional[Any] , ) -> Dict:
"""simple docstring"""
super().__init__(**lowercase )
SCREAMING_SNAKE_CASE = image_size
SCREAMING_SNAKE_CASE = patch_size
SCREAMING_SNAKE_CASE = num_channels
SCREAMING_SNAKE_CASE = max_token_length
SCREAMING_SNAKE_CASE = num_character_labels
SCREAMING_SNAKE_CASE = num_bpe_labels
SCREAMING_SNAKE_CASE = num_wordpiece_labels
SCREAMING_SNAKE_CASE = hidden_size
SCREAMING_SNAKE_CASE = num_hidden_layers
SCREAMING_SNAKE_CASE = num_attention_heads
SCREAMING_SNAKE_CASE = mlp_ratio
SCREAMING_SNAKE_CASE = distilled
SCREAMING_SNAKE_CASE = layer_norm_eps
SCREAMING_SNAKE_CASE = drop_rate
SCREAMING_SNAKE_CASE = qkv_bias
SCREAMING_SNAKE_CASE = attn_drop_rate
SCREAMING_SNAKE_CASE = drop_path_rate
SCREAMING_SNAKE_CASE = output_aa_attentions
SCREAMING_SNAKE_CASE = initializer_range | 201 | 1 |
"""simple docstring"""
import argparse
import json
import os
import numpy as np
import PIL
import requests
import tensorflow.keras.applications.efficientnet as efficientnet
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from tensorflow.keras.preprocessing import image
from transformers import (
EfficientNetConfig,
EfficientNetForImageClassification,
EfficientNetImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
A: int = logging.get_logger(__name__)
A: str = {
"b0": efficientnet.EfficientNetBa,
"b1": efficientnet.EfficientNetBa,
"b2": efficientnet.EfficientNetBa,
"b3": efficientnet.EfficientNetBa,
"b4": efficientnet.EfficientNetBa,
"b5": efficientnet.EfficientNetBa,
"b6": efficientnet.EfficientNetBa,
"b7": efficientnet.EfficientNetBa,
}
A: Tuple = {
"b0": {
"hidden_dim": 1_2_8_0,
"width_coef": 1.0,
"depth_coef": 1.0,
"image_size": 2_2_4,
"dropout_rate": 0.2,
"dw_padding": [],
},
"b1": {
"hidden_dim": 1_2_8_0,
"width_coef": 1.0,
"depth_coef": 1.1,
"image_size": 2_4_0,
"dropout_rate": 0.2,
"dw_padding": [1_6],
},
"b2": {
"hidden_dim": 1_4_0_8,
"width_coef": 1.1,
"depth_coef": 1.2,
"image_size": 2_6_0,
"dropout_rate": 0.3,
"dw_padding": [5, 8, 1_6],
},
"b3": {
"hidden_dim": 1_5_3_6,
"width_coef": 1.2,
"depth_coef": 1.4,
"image_size": 3_0_0,
"dropout_rate": 0.3,
"dw_padding": [5, 1_8],
},
"b4": {
"hidden_dim": 1_7_9_2,
"width_coef": 1.4,
"depth_coef": 1.8,
"image_size": 3_8_0,
"dropout_rate": 0.4,
"dw_padding": [6],
},
"b5": {
"hidden_dim": 2_0_4_8,
"width_coef": 1.6,
"depth_coef": 2.2,
"image_size": 4_5_6,
"dropout_rate": 0.4,
"dw_padding": [1_3, 2_7],
},
"b6": {
"hidden_dim": 2_3_0_4,
"width_coef": 1.8,
"depth_coef": 2.6,
"image_size": 5_2_8,
"dropout_rate": 0.5,
"dw_padding": [3_1],
},
"b7": {
"hidden_dim": 2_5_6_0,
"width_coef": 2.0,
"depth_coef": 3.1,
"image_size": 6_0_0,
"dropout_rate": 0.5,
"dw_padding": [1_8],
},
}
def _snake_case ( UpperCamelCase : Optional[int] ) -> int:
UpperCAmelCase : int = EfficientNetConfig()
UpperCAmelCase : List[Any] = CONFIG_MAP[model_name]["""hidden_dim"""]
UpperCAmelCase : Tuple = CONFIG_MAP[model_name]["""width_coef"""]
UpperCAmelCase : Optional[int] = CONFIG_MAP[model_name]["""depth_coef"""]
UpperCAmelCase : Any = CONFIG_MAP[model_name]["""image_size"""]
UpperCAmelCase : Optional[Any] = CONFIG_MAP[model_name]["""dropout_rate"""]
UpperCAmelCase : str = CONFIG_MAP[model_name]["""dw_padding"""]
UpperCAmelCase : int = """huggingface/label-files"""
UpperCAmelCase : List[str] = """imagenet-1k-id2label.json"""
UpperCAmelCase : str = 1000
UpperCAmelCase : Optional[int] = json.load(open(hf_hub_download(UpperCamelCase , UpperCamelCase , repo_type="""dataset""" ) , """r""" ) )
UpperCAmelCase : List[Any] = {int(UpperCamelCase ): v for k, v in idalabel.items()}
UpperCAmelCase : List[str] = idalabel
UpperCAmelCase : Any = {v: k for k, v in idalabel.items()}
return config
def _snake_case ( ) -> str:
UpperCAmelCase : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
UpperCAmelCase : Union[str, Any] = Image.open(requests.get(UpperCamelCase , stream=UpperCamelCase ).raw )
return im
def _snake_case ( UpperCamelCase : Optional[int] ) -> List[Any]:
UpperCAmelCase : Dict = CONFIG_MAP[model_name]["""image_size"""]
UpperCAmelCase : Dict = EfficientNetImageProcessor(
size={"""height""": size, """width""": size} , image_mean=[0.485, 0.456, 0.406] , image_std=[0.47853944, 0.4732864, 0.47434163] , do_center_crop=UpperCamelCase , )
return preprocessor
def _snake_case ( UpperCamelCase : Dict ) -> Tuple:
UpperCAmelCase : Dict = [v.split("""_""" )[0].split("""block""" )[1] for v in original_param_names if v.startswith("""block""" )]
UpperCAmelCase : int = sorted(set(UpperCamelCase ) )
UpperCAmelCase : Union[str, Any] = len(UpperCamelCase )
UpperCAmelCase : List[str] = {b: str(UpperCamelCase ) for b, i in zip(UpperCamelCase , range(UpperCamelCase ) )}
UpperCAmelCase : List[Any] = []
rename_keys.append(("""stem_conv/kernel:0""", """embeddings.convolution.weight""") )
rename_keys.append(("""stem_bn/gamma:0""", """embeddings.batchnorm.weight""") )
rename_keys.append(("""stem_bn/beta:0""", """embeddings.batchnorm.bias""") )
rename_keys.append(("""stem_bn/moving_mean:0""", """embeddings.batchnorm.running_mean""") )
rename_keys.append(("""stem_bn/moving_variance:0""", """embeddings.batchnorm.running_var""") )
for b in block_names:
UpperCAmelCase : List[Any] = block_name_mapping[b]
rename_keys.append((F"block{b}_expand_conv/kernel:0", F"encoder.blocks.{hf_b}.expansion.expand_conv.weight") )
rename_keys.append((F"block{b}_expand_bn/gamma:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.weight") )
rename_keys.append((F"block{b}_expand_bn/beta:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.bias") )
rename_keys.append(
(F"block{b}_expand_bn/moving_mean:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.running_mean") )
rename_keys.append(
(F"block{b}_expand_bn/moving_variance:0", F"encoder.blocks.{hf_b}.expansion.expand_bn.running_var") )
rename_keys.append(
(F"block{b}_dwconv/depthwise_kernel:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_conv.weight") )
rename_keys.append((F"block{b}_bn/gamma:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.weight") )
rename_keys.append((F"block{b}_bn/beta:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.bias") )
rename_keys.append(
(F"block{b}_bn/moving_mean:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_mean") )
rename_keys.append(
(F"block{b}_bn/moving_variance:0", F"encoder.blocks.{hf_b}.depthwise_conv.depthwise_norm.running_var") )
rename_keys.append((F"block{b}_se_reduce/kernel:0", F"encoder.blocks.{hf_b}.squeeze_excite.reduce.weight") )
rename_keys.append((F"block{b}_se_reduce/bias:0", F"encoder.blocks.{hf_b}.squeeze_excite.reduce.bias") )
rename_keys.append((F"block{b}_se_expand/kernel:0", F"encoder.blocks.{hf_b}.squeeze_excite.expand.weight") )
rename_keys.append((F"block{b}_se_expand/bias:0", F"encoder.blocks.{hf_b}.squeeze_excite.expand.bias") )
rename_keys.append(
(F"block{b}_project_conv/kernel:0", F"encoder.blocks.{hf_b}.projection.project_conv.weight") )
rename_keys.append((F"block{b}_project_bn/gamma:0", F"encoder.blocks.{hf_b}.projection.project_bn.weight") )
rename_keys.append((F"block{b}_project_bn/beta:0", F"encoder.blocks.{hf_b}.projection.project_bn.bias") )
rename_keys.append(
(F"block{b}_project_bn/moving_mean:0", F"encoder.blocks.{hf_b}.projection.project_bn.running_mean") )
rename_keys.append(
(F"block{b}_project_bn/moving_variance:0", F"encoder.blocks.{hf_b}.projection.project_bn.running_var") )
rename_keys.append(("""top_conv/kernel:0""", """encoder.top_conv.weight""") )
rename_keys.append(("""top_bn/gamma:0""", """encoder.top_bn.weight""") )
rename_keys.append(("""top_bn/beta:0""", """encoder.top_bn.bias""") )
rename_keys.append(("""top_bn/moving_mean:0""", """encoder.top_bn.running_mean""") )
rename_keys.append(("""top_bn/moving_variance:0""", """encoder.top_bn.running_var""") )
UpperCAmelCase : List[str] = {}
for item in rename_keys:
if item[0] in original_param_names:
UpperCAmelCase : str = """efficientnet.""" + item[1]
UpperCAmelCase : List[str] = """classifier.weight"""
UpperCAmelCase : List[Any] = """classifier.bias"""
return key_mapping
def _snake_case ( UpperCamelCase : Tuple , UpperCamelCase : List[str] , UpperCamelCase : Dict ) -> int:
for key, value in tf_params.items():
if "normalization" in key:
continue
UpperCAmelCase : Optional[int] = key_mapping[key]
if "_conv" in key and "kernel" in key:
UpperCAmelCase : Dict = torch.from_numpy(UpperCamelCase ).permute(3 , 2 , 0 , 1 )
elif "depthwise_kernel" in key:
UpperCAmelCase : Optional[Any] = torch.from_numpy(UpperCamelCase ).permute(2 , 3 , 0 , 1 )
elif "kernel" in key:
UpperCAmelCase : int = torch.from_numpy(np.transpose(UpperCamelCase ) )
else:
UpperCAmelCase : str = torch.from_numpy(UpperCamelCase )
# Replace HF parameters with original TF model parameters
assert hf_params[hf_key].shape == new_hf_value.shape
hf_params[hf_key].copy_(UpperCamelCase )
@torch.no_grad()
def _snake_case ( UpperCamelCase : Tuple , UpperCamelCase : str , UpperCamelCase : Any , UpperCamelCase : Union[str, Any] ) -> Any:
UpperCAmelCase : Optional[int] = model_classes[model_name](
include_top=UpperCamelCase , weights="""imagenet""" , input_tensor=UpperCamelCase , input_shape=UpperCamelCase , pooling=UpperCamelCase , classes=1000 , classifier_activation="""softmax""" , )
UpperCAmelCase : List[str] = original_model.trainable_variables
UpperCAmelCase : Dict = original_model.non_trainable_variables
UpperCAmelCase : Union[str, Any] = {param.name: param.numpy() for param in tf_params}
for param in tf_non_train_params:
UpperCAmelCase : List[Any] = param.numpy()
UpperCAmelCase : Dict = list(tf_params.keys() )
# Load HuggingFace model
UpperCAmelCase : int = get_efficientnet_config(UpperCamelCase )
UpperCAmelCase : int = EfficientNetForImageClassification(UpperCamelCase ).eval()
UpperCAmelCase : int = hf_model.state_dict()
# Create src-to-dst parameter name mapping dictionary
print("""Converting parameters...""" )
UpperCAmelCase : int = rename_keys(UpperCamelCase )
replace_params(UpperCamelCase , UpperCamelCase , UpperCamelCase )
# Initialize preprocessor and preprocess input image
UpperCAmelCase : Tuple = convert_image_processor(UpperCamelCase )
UpperCAmelCase : Optional[Any] = preprocessor(images=prepare_img() , return_tensors="""pt""" )
# HF model inference
hf_model.eval()
with torch.no_grad():
UpperCAmelCase : Dict = hf_model(**UpperCamelCase )
UpperCAmelCase : Tuple = outputs.logits.detach().numpy()
# Original model inference
UpperCAmelCase : Union[str, Any] = False
UpperCAmelCase : List[Any] = CONFIG_MAP[model_name]["""image_size"""]
UpperCAmelCase : Dict = prepare_img().resize((image_size, image_size) , resample=PIL.Image.NEAREST )
UpperCAmelCase : int = image.img_to_array(UpperCamelCase )
UpperCAmelCase : Tuple = np.expand_dims(UpperCamelCase , axis=0 )
UpperCAmelCase : Optional[Any] = original_model.predict(UpperCamelCase )
# Check whether original and HF model outputs match -> np.allclose
assert np.allclose(UpperCamelCase , UpperCamelCase , atol=1e-3 ), "The predicted logits are not the same."
print("""Model outputs match!""" )
if save_model:
# Create folder to save model
if not os.path.isdir(UpperCamelCase ):
os.mkdir(UpperCamelCase )
# Save converted model and image processor
hf_model.save_pretrained(UpperCamelCase )
preprocessor.save_pretrained(UpperCamelCase )
if push_to_hub:
# Push model and image processor to hub
print(F"Pushing converted {model_name} to the hub..." )
UpperCAmelCase : Optional[Any] = F"efficientnet-{model_name}"
preprocessor.push_to_hub(UpperCamelCase )
hf_model.push_to_hub(UpperCamelCase )
if __name__ == "__main__":
A: str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="b0",
type=str,
help="Version name of the EfficientNet model you want to convert, select from [b0, b1, b2, b3, b4, b5, b6, b7].",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="hf_model",
type=str,
help="Path to the output PyTorch model directory.",
)
parser.add_argument("--save_model", action="store_true", help="Save model to local")
parser.add_argument("--push_to_hub", action="store_true", help="Push model and image processor to the hub")
A: Any = parser.parse_args()
convert_efficientnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.save_model, args.push_to_hub)
| 720 |
"""simple docstring"""
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
A: Optional[Any] = "\\n@INPROCEEDINGS{Papineni02bleu:a,\n author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},\n title = {BLEU: a Method for Automatic Evaluation of Machine Translation},\n booktitle = {},\n year = {2002},\n pages = {311--318}\n}\n@inproceedings{lin-och-2004-orange,\n title = \"{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation\",\n author = \"Lin, Chin-Yew and\n Och, Franz Josef\",\n booktitle = \"{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics\",\n month = \"aug 23{--}aug 27\",\n year = \"2004\",\n address = \"Geneva, Switzerland\",\n publisher = \"COLING\",\n url = \"https://www.aclweb.org/anthology/C04-1072\",\n pages = \"501--507\",\n}\n"
A: Optional[int] = "\\nBLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.\nQuality is considered to be the correspondence between a machine's output and that of a human: \"the closer a machine translation is to a professional human translation,\nthe better it is\" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and\nremains one of the most popular automated and inexpensive metrics.\n\nScores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.\nThose scores are then averaged over the whole corpus to reach an estimate of the translation's overall quality. Intelligibility or grammatical correctness\nare not taken into account[citation needed].\n\nBLEU's output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1\nrepresenting more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the\nreference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional\nreference translations will increase the BLEU score.\n"
A: Optional[int] = "\nComputes BLEU score of translated segments against one or more references.\nArgs:\n predictions: list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references: list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\nReturns:\n 'bleu': bleu score,\n 'precisions': geometric mean of n-gram precisions,\n 'brevity_penalty': brevity penalty,\n 'length_ratio': ratio of lengths,\n 'translation_length': translation_length,\n 'reference_length': reference_length\nExamples:\n\n >>> predictions = [\n ... [\"hello\", \"there\", \"general\", \"kenobi\"], # tokenized prediction of the first sample\n ... [\"foo\", \"bar\", \"foobar\"] # tokenized prediction of the second sample\n ... ]\n >>> references = [\n ... [[\"hello\", \"there\", \"general\", \"kenobi\"], [\"hello\", \"there\", \"!\"]], # tokenized references for the first sample (2 references)\n ... [[\"foo\", \"bar\", \"foobar\"]] # tokenized references for the second sample (1 reference)\n ... ]\n >>> bleu = datasets.load_metric(\"bleu\")\n >>> results = bleu.compute(predictions=predictions, references=references)\n >>> print(results[\"bleu\"])\n 1.0\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
def SCREAMING_SNAKE_CASE ( self ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=["""https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py"""] , reference_urls=[
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def SCREAMING_SNAKE_CASE ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=False ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase : Dict = compute_bleu(
reference_corpus=_SCREAMING_SNAKE_CASE , translation_corpus=_SCREAMING_SNAKE_CASE , max_order=_SCREAMING_SNAKE_CASE , smooth=_SCREAMING_SNAKE_CASE )
((UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase) , (UpperCAmelCase)) : Dict = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
}
| 359 | 0 |
"""simple docstring"""
import argparse
import os
from io import BytesIO
from pathlib import Path
import requests
from clip_retrieval.clip_client import ClipClient
from PIL import Image
from tqdm import tqdm
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
"""simple docstring"""
_lowerCAmelCase = 1.5
_lowerCAmelCase = int(factor * num_class_images )
_lowerCAmelCase = ClipClient(
url='https://knn.laion.ai/knn-service', indice_name='laion_400m', num_images=__snake_case, aesthetic_weight=0.1 )
os.makedirs(F'''{class_data_dir}/images''', exist_ok=__snake_case )
if len(list(Path(F'''{class_data_dir}/images''' ).iterdir() ) ) >= num_class_images:
return
while True:
_lowerCAmelCase = client.query(text=__snake_case )
if len(__snake_case ) >= factor * num_class_images or num_images > 1e4:
break
else:
_lowerCAmelCase = int(factor * num_images )
_lowerCAmelCase = ClipClient(
url='https://knn.laion.ai/knn-service', indice_name='laion_400m', num_images=__snake_case, aesthetic_weight=0.1, )
_lowerCAmelCase = 0
_lowerCAmelCase = 0
_lowerCAmelCase = tqdm(desc='downloading real regularization images', total=__snake_case )
with open(F'''{class_data_dir}/caption.txt''', 'w' ) as fa, open(F'''{class_data_dir}/urls.txt''', 'w' ) as fa, open(
F'''{class_data_dir}/images.txt''', 'w' ) as fa:
while total < num_class_images:
_lowerCAmelCase = class_images[count]
count += 1
try:
_lowerCAmelCase = requests.get(images['url'] )
if img.status_code == 2_0_0:
_lowerCAmelCase = Image.open(BytesIO(img.content ) )
with open(F'''{class_data_dir}/images/{total}.jpg''', 'wb' ) as f:
f.write(img.content )
fa.write(images['caption'] + '\n' )
fa.write(images['url'] + '\n' )
fa.write(F'''{class_data_dir}/images/{total}.jpg''' + '\n' )
total += 1
pbar.update(1 )
else:
continue
except Exception:
continue
return
def A__ ( ):
"""simple docstring"""
_lowerCAmelCase = argparse.ArgumentParser('', add_help=__snake_case )
parser.add_argument('--class_prompt', help='text prompt to retrieve images', required=__snake_case, type=__snake_case )
parser.add_argument('--class_data_dir', help='path to save images', required=__snake_case, type=__snake_case )
parser.add_argument('--num_class_images', help='number of images to download', default=2_0_0, type=__snake_case )
return parser.parse_args()
if __name__ == "__main__":
a__ : List[Any] = parse_args()
retrieve(args.class_prompt, args.class_data_dir, args.num_class_images)
| 589 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__UpperCAmelCase = {
'configuration_table_transformer': [
'TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP',
'TableTransformerConfig',
'TableTransformerOnnxConfig',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = [
'TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'TableTransformerForObjectDetection',
'TableTransformerModel',
'TableTransformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TableTransformerConfig,
TableTransformerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_table_transformer import (
TABLE_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TableTransformerForObjectDetection,
TableTransformerModel,
TableTransformerPreTrainedModel,
)
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 406 | 0 |
from dataclasses import dataclass, field
from typing import Optional
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
__snake_case : Optional[str] = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be trained."} )
__snake_case : Optional[str] = field(
default="./" , metadata={"help": "Save dir where model repo is cloned and models updates are saved to."} )
__snake_case : Optional[str] = field(
default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path of training dataset."} )
__snake_case : Optional[str] = field(
default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."} )
__snake_case : Optional[int] = field(default=2 , metadata={"help": "Batch size for training."} )
__snake_case : Optional[int] = field(default=2 , metadata={"help": "Batch size for evaluation."} )
__snake_case : Optional[float] = field(default=0.1 , metadata={"help": "Value of weight decay."} )
__snake_case : Optional[int] = field(
default=10000 , metadata={"help": "Size of buffer used to shuffle streaming dataset."} )
__snake_case : Optional[float] = field(default=2E-4 , metadata={"help": "Learning rate fo training."} )
__snake_case : Optional[str] = field(default="cosine" , metadata={"help": "Learning rate."} )
__snake_case : Optional[int] = field(
default=750 , metadata={"help": "Number of warmup steps in the learning rate schedule."} )
__snake_case : Optional[int] = field(
default=16 , metadata={"help": "Number of gradient accumulation steps."} )
__snake_case : Optional[bool] = field(
default=lowerCAmelCase_ , metadata={"help": "Use gradient checkpointing to reduce memory footprint."} )
__snake_case : Optional[int] = field(default=50000 , metadata={"help": "Maximum number of training steps."} )
__snake_case : Optional[int] = field(
default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
__snake_case : Optional[int] = field(default=1024 , metadata={"help": "Sequence lengths used for training."} )
__snake_case : Optional[int] = field(default=1 , metadata={"help": "Training seed."} )
__snake_case : Optional[int] = field(
default=1024 , metadata={"help": "Interval to save checkpoints. Measured as number of forward passes not training steps."} , )
__snake_case : Optional[str] = field(
default=lowerCAmelCase_ , metadata={"help": "States path if the training should continue from a checkpoint folder."} )
__snake_case : Optional[bool] = field(default=lowerCAmelCase_ , metadata={"help": "If True the data is pretokenized."} )
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
__snake_case : Optional[str] = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."} )
__snake_case : Optional[str] = field(
default="codeparrot/codeparrot-clean-valid" , metadata={"help": "Name or path of validation dataset."} )
__snake_case : Optional[int] = field(default=2 , metadata={"help": "Batch size used for evaluation."} )
__snake_case : Optional[int] = field(
default=-1 , metadata={"help": "Maximum number of evaluation steps. If -1 the full dataset is evaluated."} )
__snake_case : Optional[int] = field(default=1024 , metadata={"help": "Length of sequences to be evaluated."} )
__snake_case : Optional[int] = field(default=1 , metadata={"help": "Random seed used for evaluation."} )
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
__snake_case : Optional[str] = field(
default="codeparrot/codeparrot" , metadata={"help": "Model name or path of model to be evaluated."} )
__snake_case : Optional[int] = field(default=lowerCAmelCase_ , metadata={"help": "Number of workers used for code evaluation."} )
__snake_case : Optional[int] = field(
default=lowerCAmelCase_ , metadata={"help": "The number of human-eval tasks to run. If not included all tasks are evaluated."} , )
__snake_case : Optional[bool] = field(
default=lowerCAmelCase_ , metadata={"help": "Sample from the language model's output distribution."} )
__snake_case : Optional[float] = field(default=0.2 , metadata={"help": "Sampling temperature used for generation."} )
__snake_case : Optional[int] = field(default=256 , metadata={"help": "Maximum number of newly generated tokens."} )
__snake_case : Optional[int] = field(default=0 , metadata={"help": "Top-k parameter used for generation."} )
__snake_case : Optional[float] = field(default=0.95 , metadata={"help": "Top-p parameter used for nucleus sampling."} )
__snake_case : Optional[int] = field(default=10 , metadata={"help": "Number of generations to run in parallel."} )
__snake_case : Optional[int] = field(
default=200 , metadata={"help": "Number of completions to generate for each sample."} )
__snake_case : Optional[int] = field(default=1 , metadata={"help": "Random seed used for evaluation."} )
__snake_case : Optional[str] = field(
default="eval_results.json" , metadata={"help": "Random seed used for evaluation."} )
__snake_case : Optional[str] = field(
default="0" , metadata={"help": "Allow `code_eval` to execute Python code on machine"} )
__snake_case : Optional[int] = field(
default=-1 , metadata={
"help": (
"Determine which device to run the `text-generation` Pipeline on. -1 is CPU and any zero or positive"
" number corresponds to which GPU device id to run on."
)
} , )
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
__snake_case : Optional[int] = field(
default=lowerCAmelCase_ , metadata={
"help": "The number of CPU cores to use for parallel preprocessing. Default uses the maximum available."
} , )
__snake_case : Optional[str] = field(
default="transformersbook/codeparrot" , metadata={"help": "Folder or name of dataset to process."} )
__snake_case : Optional[str] = field(
default="codeparrot-clean" , metadata={"help": "Folder to save processed processed dataset."} )
__snake_case : Optional[int] = field(
default=100000 , metadata={"help": "Number of files to save per JSON output file."} )
__snake_case : Optional[str] = field(default="content" , metadata={"help": "Column containing text data to process."} )
__snake_case : Optional[float] = field(
default=1000 , metadata={"help": "Maximum line length in file, otherwise file is filtered."} )
__snake_case : Optional[float] = field(
default=100 , metadata={"help": "Maximum mean line length in file, otherwise file is filtered."} )
__snake_case : Optional[float] = field(
default=0.25 , metadata={"help": "Maximum fraction of non-alphanumeric characters, otherwise file is filtered."} )
__snake_case : Optional[float] = field(
default=1.5 , metadata={"help": "Minimum character token ratio for the file, otherwise file is filtered."} )
__snake_case : Optional[float] = field(
default=0.7 , metadata={"help": "Probability for filtering config, test and uncommon files."} )
__snake_case : Optional[str] = field(
default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} , )
__snake_case : Optional[bool] = field(
default=lowerCAmelCase_ , metadata={"help": "If True, near-duplicate samples are removed."} )
__snake_case : Optional[float] = field(
default=0.85 , metadata={"help": "Jaccard threshold for near-duplicate samples."} )
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
__snake_case : Optional[str] = field(
default="gpt2" , metadata={"help": "Base tokenizer to build new tokenizer from."} )
__snake_case : Optional[str] = field(
default="transformersbook/codeparrot-train" , metadata={"help": "Dataset to train tokenizer on."} )
__snake_case : Optional[str] = field(default="content" , metadata={"help": "Column containing text data to process."} )
__snake_case : Optional[int] = field(default=200000 , metadata={"help": "Number of examples to train tokenizer on."} )
__snake_case : Optional[int] = field(
default=32768 , metadata={"help": "Number of examples to train the tokenizer on."} )
__snake_case : Optional[str] = field(default="codeparrot" , metadata={"help": "Name of new tokenizer."} )
__snake_case : Optional[bool] = field(default=lowerCAmelCase_ , metadata={"help": "Push saved tokenizer to the hub."} )
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
__snake_case : Optional[str] = field(
default="codeparrot/codeparrot" , metadata={"help": "Name or path to the tokenizer."} )
__snake_case : Optional[str] = field(
default="codeparrot/codeparrot-clean-train" , metadata={"help": "Name or path to the dataset to pretokenize."} )
__snake_case : Optional[str] = field(
default="tokenized-codeparrot-train" , metadata={"help": "Repo name of the pretokenized data."} )
__snake_case : Optional[int] = field(default=lowerCAmelCase_ , metadata={"help": "Number of workers used for code evaluation."} )
@dataclass
class UpperCamelCase__ :
'''simple docstring'''
__snake_case : Optional[str] = field(
default="gpt2-large" , metadata={"help": "Configuration to use for model initialization."} )
__snake_case : Optional[str] = field(
default="codeparrot/codeparrot" , metadata={"help": "Tokenizer attached to model."} )
__snake_case : Optional[str] = field(default="codeparrot" , metadata={"help": "Name of the created model."} )
__snake_case : Optional[bool] = field(default=lowerCAmelCase_ , metadata={"help": "Push saved tokenizer to the hub."} )
| 711 |
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
_validate_point(_SCREAMING_SNAKE_CASE )
_validate_point(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(a - b ) for a, b in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) )
def __lowercase ( _SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
if point:
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for item in point:
if not isinstance(_SCREAMING_SNAKE_CASE , (int, float) ):
SCREAMING_SNAKE_CASE = (
"""Expected a list of numbers as input, found """
F"""{type(_SCREAMING_SNAKE_CASE ).__name__}"""
)
raise TypeError(_SCREAMING_SNAKE_CASE )
else:
SCREAMING_SNAKE_CASE = F"""Expected a list of numbers as input, found {type(_SCREAMING_SNAKE_CASE ).__name__}"""
raise TypeError(_SCREAMING_SNAKE_CASE )
else:
raise ValueError("""Missing an input""" )
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> float:
'''simple docstring'''
_validate_point(_SCREAMING_SNAKE_CASE )
_validate_point(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) != len(_SCREAMING_SNAKE_CASE ):
raise ValueError("""Both points must be in the same n-dimensional space""" )
return float(sum(abs(x - y ) for x, y in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 116 | 0 |
import gc
import unittest
import numpy as np
import torch
import torch.nn.functional as F
from transformers import (
ClapTextConfig,
ClapTextModelWithProjection,
RobertaTokenizer,
SpeechTaHifiGan,
SpeechTaHifiGanConfig,
)
from diffusers import (
AudioLDMPipeline,
AutoencoderKL,
DDIMScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_AUDIO_BATCH_PARAMS, TEXT_TO_AUDIO_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class a ( lowercase__ , unittest.TestCase ):
"""simple docstring"""
a : List[Any] = AudioLDMPipeline
a : Optional[Any] = TEXT_TO_AUDIO_PARAMS
a : Dict = TEXT_TO_AUDIO_BATCH_PARAMS
a : Optional[int] = frozenset(
[
'num_inference_steps',
'num_waveforms_per_prompt',
'generator',
'latents',
'output_type',
'return_dict',
'callback',
'callback_steps',
] )
def UpperCAmelCase ( self : Any ) -> List[str]:
torch.manual_seed(0 )
__UpperCAmelCase : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""") , up_block_types=("""CrossAttnUpBlock2D""", """UpBlock2D""") , cross_attention_dim=(32, 64) , class_embed_type="""simple_projection""" , projection_class_embeddings_input_dim=32 , class_embeddings_concat=__lowercase , )
__UpperCAmelCase : Optional[int] = DDIMScheduler(
beta_start=0.00_085 , beta_end=0.012 , beta_schedule="""scaled_linear""" , clip_sample=__lowercase , set_alpha_to_one=__lowercase , )
torch.manual_seed(0 )
__UpperCAmelCase : Optional[Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=1 , out_channels=1 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=4 , )
torch.manual_seed(0 )
__UpperCAmelCase : Optional[Any] = ClapTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , projection_dim=32 , )
__UpperCAmelCase : Optional[int] = ClapTextModelWithProjection(__lowercase )
__UpperCAmelCase : str = RobertaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-roberta""" , model_max_length=77 )
__UpperCAmelCase : Dict = SpeechTaHifiGanConfig(
model_in_dim=8 , sampling_rate=16000 , upsample_initial_channel=16 , upsample_rates=[2, 2] , upsample_kernel_sizes=[4, 4] , resblock_kernel_sizes=[3, 7] , resblock_dilation_sizes=[[1, 3, 5], [1, 3, 5]] , normalize_before=__lowercase , )
__UpperCAmelCase : int = SpeechTaHifiGan(__lowercase )
__UpperCAmelCase : Tuple = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
"""vocoder""": vocoder,
}
return components
def UpperCAmelCase ( self : Optional[int] , __lowercase : Any , __lowercase : str=0 ) -> List[str]:
if str(__lowercase ).startswith("""mps""" ):
__UpperCAmelCase : Dict = torch.manual_seed(__lowercase )
else:
__UpperCAmelCase : Tuple = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__UpperCAmelCase : Tuple = {
"""prompt""": """A hammer hitting a wooden surface""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
}
return inputs
def UpperCAmelCase ( self : Tuple ) -> Union[str, Any]:
__UpperCAmelCase : Tuple = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : Dict = self.get_dummy_components()
__UpperCAmelCase : List[Any] = AudioLDMPipeline(**__lowercase )
__UpperCAmelCase : Tuple = audioldm_pipe.to(__lowercase )
audioldm_pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : List[Any] = self.get_dummy_inputs(__lowercase )
__UpperCAmelCase : Union[str, Any] = audioldm_pipe(**__lowercase )
__UpperCAmelCase : Union[str, Any] = output.audios[0]
assert audio.ndim == 1
assert len(__lowercase ) == 256
__UpperCAmelCase : str = audio[:10]
__UpperCAmelCase : List[Any] = np.array(
[-0.0_050, 0.0_050, -0.0_060, 0.0_033, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_033] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self : Tuple ) -> Optional[int]:
__UpperCAmelCase : List[str] = self.get_dummy_components()
__UpperCAmelCase : Any = AudioLDMPipeline(**__lowercase )
__UpperCAmelCase : Tuple = audioldm_pipe.to(__lowercase )
__UpperCAmelCase : str = audioldm_pipe.to(__lowercase )
audioldm_pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : Tuple = self.get_dummy_inputs(__lowercase )
__UpperCAmelCase : Dict = 3 * [inputs["""prompt"""]]
# forward
__UpperCAmelCase : Union[str, Any] = audioldm_pipe(**__lowercase )
__UpperCAmelCase : int = output.audios[0]
__UpperCAmelCase : List[str] = self.get_dummy_inputs(__lowercase )
__UpperCAmelCase : Any = 3 * [inputs.pop("""prompt""" )]
__UpperCAmelCase : Tuple = audioldm_pipe.tokenizer(
__lowercase , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__lowercase , return_tensors="""pt""" , )
__UpperCAmelCase : Optional[Any] = text_inputs["""input_ids"""].to(__lowercase )
__UpperCAmelCase : int = audioldm_pipe.text_encoder(
__lowercase , )
__UpperCAmelCase : Dict = prompt_embeds.text_embeds
# additional L_2 normalization over each hidden-state
__UpperCAmelCase : Tuple = F.normalize(__lowercase , dim=-1 )
__UpperCAmelCase : Tuple = prompt_embeds
# forward
__UpperCAmelCase : Dict = audioldm_pipe(**__lowercase )
__UpperCAmelCase : str = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def UpperCAmelCase ( self : Union[str, Any] ) -> str:
__UpperCAmelCase : Tuple = self.get_dummy_components()
__UpperCAmelCase : Any = AudioLDMPipeline(**__lowercase )
__UpperCAmelCase : Dict = audioldm_pipe.to(__lowercase )
__UpperCAmelCase : int = audioldm_pipe.to(__lowercase )
audioldm_pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs(__lowercase )
__UpperCAmelCase : Optional[Any] = 3 * ["""this is a negative prompt"""]
__UpperCAmelCase : Optional[Any] = negative_prompt
__UpperCAmelCase : Tuple = 3 * [inputs["""prompt"""]]
# forward
__UpperCAmelCase : int = audioldm_pipe(**__lowercase )
__UpperCAmelCase : Any = output.audios[0]
__UpperCAmelCase : List[Any] = self.get_dummy_inputs(__lowercase )
__UpperCAmelCase : Tuple = 3 * [inputs.pop("""prompt""" )]
__UpperCAmelCase : List[Any] = []
for p in [prompt, negative_prompt]:
__UpperCAmelCase : List[str] = audioldm_pipe.tokenizer(
__lowercase , padding="""max_length""" , max_length=audioldm_pipe.tokenizer.model_max_length , truncation=__lowercase , return_tensors="""pt""" , )
__UpperCAmelCase : Union[str, Any] = text_inputs["""input_ids"""].to(__lowercase )
__UpperCAmelCase : Optional[Any] = audioldm_pipe.text_encoder(
__lowercase , )
__UpperCAmelCase : Tuple = text_embeds.text_embeds
# additional L_2 normalization over each hidden-state
__UpperCAmelCase : Any = F.normalize(__lowercase , dim=-1 )
embeds.append(__lowercase )
__UpperCAmelCase , __UpperCAmelCase : Optional[int] = embeds
# forward
__UpperCAmelCase : str = audioldm_pipe(**__lowercase )
__UpperCAmelCase : str = output.audios[0]
assert np.abs(audio_a - audio_a ).max() < 1e-2
def UpperCAmelCase ( self : Dict ) -> Tuple:
__UpperCAmelCase : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : List[Any] = self.get_dummy_components()
__UpperCAmelCase : Union[str, Any] = PNDMScheduler(skip_prk_steps=__lowercase )
__UpperCAmelCase : Tuple = AudioLDMPipeline(**__lowercase )
__UpperCAmelCase : str = audioldm_pipe.to(__lowercase )
audioldm_pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs(__lowercase )
__UpperCAmelCase : Optional[Any] = """egg cracking"""
__UpperCAmelCase : Optional[Any] = audioldm_pipe(**__lowercase , negative_prompt=__lowercase )
__UpperCAmelCase : Tuple = output.audios[0]
assert audio.ndim == 1
assert len(__lowercase ) == 256
__UpperCAmelCase : Union[str, Any] = audio[:10]
__UpperCAmelCase : int = np.array(
[-0.0_051, 0.0_050, -0.0_060, 0.0_034, -0.0_026, 0.0_033, -0.0_027, 0.0_033, -0.0_028, 0.0_032] )
assert np.abs(audio_slice - expected_slice ).max() < 1e-2
def UpperCAmelCase ( self : str ) -> Any:
__UpperCAmelCase : str = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : List[Any] = self.get_dummy_components()
__UpperCAmelCase : str = PNDMScheduler(skip_prk_steps=__lowercase )
__UpperCAmelCase : Tuple = AudioLDMPipeline(**__lowercase )
__UpperCAmelCase : Tuple = audioldm_pipe.to(__lowercase )
audioldm_pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : str = """A hammer hitting a wooden surface"""
# test num_waveforms_per_prompt=1 (default)
__UpperCAmelCase : Union[str, Any] = audioldm_pipe(__lowercase , num_inference_steps=2 ).audios
assert audios.shape == (1, 256)
# test num_waveforms_per_prompt=1 (default) for batch of prompts
__UpperCAmelCase : Optional[Any] = 2
__UpperCAmelCase : int = audioldm_pipe([prompt] * batch_size , num_inference_steps=2 ).audios
assert audios.shape == (batch_size, 256)
# test num_waveforms_per_prompt for single prompt
__UpperCAmelCase : int = 2
__UpperCAmelCase : str = audioldm_pipe(__lowercase , num_inference_steps=2 , num_waveforms_per_prompt=__lowercase ).audios
assert audios.shape == (num_waveforms_per_prompt, 256)
# test num_waveforms_per_prompt for batch of prompts
__UpperCAmelCase : Any = 2
__UpperCAmelCase : Tuple = audioldm_pipe(
[prompt] * batch_size , num_inference_steps=2 , num_waveforms_per_prompt=__lowercase ).audios
assert audios.shape == (batch_size * num_waveforms_per_prompt, 256)
def UpperCAmelCase ( self : List[str] ) -> str:
__UpperCAmelCase : Optional[Any] = """cpu""" # ensure determinism for the device-dependent torch.Generator
__UpperCAmelCase : Tuple = self.get_dummy_components()
__UpperCAmelCase : int = AudioLDMPipeline(**__lowercase )
__UpperCAmelCase : Dict = audioldm_pipe.to(__lowercase )
audioldm_pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : List[str] = audioldm_pipe.vocoder.config.sampling_rate
__UpperCAmelCase : Union[str, Any] = self.get_dummy_inputs(__lowercase )
__UpperCAmelCase : Optional[Any] = audioldm_pipe(audio_length_in_s=0.016 , **__lowercase )
__UpperCAmelCase : Tuple = output.audios[0]
assert audio.ndim == 1
assert len(__lowercase ) / vocoder_sampling_rate == 0.016
__UpperCAmelCase : Optional[Any] = audioldm_pipe(audio_length_in_s=0.032 , **__lowercase )
__UpperCAmelCase : Dict = output.audios[0]
assert audio.ndim == 1
assert len(__lowercase ) / vocoder_sampling_rate == 0.032
def UpperCAmelCase ( self : Any ) -> List[Any]:
__UpperCAmelCase : List[Any] = self.get_dummy_components()
__UpperCAmelCase : Any = AudioLDMPipeline(**__lowercase )
__UpperCAmelCase : Dict = audioldm_pipe.to(__lowercase )
audioldm_pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : List[str] = ["""hey"""]
__UpperCAmelCase : Dict = audioldm_pipe(__lowercase , num_inference_steps=1 )
__UpperCAmelCase : Tuple = output.audios.shape
assert audio_shape == (1, 256)
__UpperCAmelCase : Optional[Any] = audioldm_pipe.vocoder.config
config.model_in_dim *= 2
__UpperCAmelCase : List[Any] = SpeechTaHifiGan(__lowercase ).to(__lowercase )
__UpperCAmelCase : Dict = audioldm_pipe(__lowercase , num_inference_steps=1 )
__UpperCAmelCase : int = output.audios.shape
# waveform shape is unchanged, we just have 2x the number of mel channels in the spectrogram
assert audio_shape == (1, 256)
def UpperCAmelCase ( self : Dict ) -> Optional[int]:
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=__lowercase )
def UpperCAmelCase ( self : str ) -> Any:
self._test_inference_batch_single_identical(test_mean_pixel_difference=__lowercase )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() , reason="""XFormers attention is only available with CUDA and `xformers` installed""" , )
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=__lowercase )
@slow
class a ( unittest.TestCase ):
"""simple docstring"""
def UpperCAmelCase ( self : Dict ) -> Tuple:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase ( self : Optional[Any] , __lowercase : Optional[int] , __lowercase : int="cpu" , __lowercase : List[Any]=torch.floataa , __lowercase : Tuple=0 ) -> Dict:
__UpperCAmelCase : int = torch.Generator(device=__lowercase ).manual_seed(__lowercase )
__UpperCAmelCase : Dict = np.random.RandomState(__lowercase ).standard_normal((1, 8, 128, 16) )
__UpperCAmelCase : Optional[Any] = torch.from_numpy(__lowercase ).to(device=__lowercase , dtype=__lowercase )
__UpperCAmelCase : int = {
"""prompt""": """A hammer hitting a wooden surface""",
"""latents""": latents,
"""generator""": generator,
"""num_inference_steps""": 3,
"""guidance_scale""": 2.5,
}
return inputs
def UpperCAmelCase ( self : int ) -> List[str]:
__UpperCAmelCase : Any = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
__UpperCAmelCase : Union[str, Any] = audioldm_pipe.to(__lowercase )
audioldm_pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : Tuple = self.get_inputs(__lowercase )
__UpperCAmelCase : str = 25
__UpperCAmelCase : Optional[int] = audioldm_pipe(**__lowercase ).audios[0]
assert audio.ndim == 1
assert len(__lowercase ) == 81920
__UpperCAmelCase : Dict = audio[77230:77240]
__UpperCAmelCase : Optional[Any] = np.array(
[-0.4_884, -0.4_607, 0.0_023, 0.5_007, 0.5_896, 0.5_151, 0.3_813, -0.0_208, -0.3_687, -0.4_315] )
__UpperCAmelCase : Optional[Any] = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 1e-2
def UpperCAmelCase ( self : str ) -> Tuple:
__UpperCAmelCase : Optional[Any] = AudioLDMPipeline.from_pretrained("""cvssp/audioldm""" )
__UpperCAmelCase : Any = LMSDiscreteScheduler.from_config(audioldm_pipe.scheduler.config )
__UpperCAmelCase : int = audioldm_pipe.to(__lowercase )
audioldm_pipe.set_progress_bar_config(disable=__lowercase )
__UpperCAmelCase : List[Any] = self.get_inputs(__lowercase )
__UpperCAmelCase : Optional[int] = audioldm_pipe(**__lowercase ).audios[0]
assert audio.ndim == 1
assert len(__lowercase ) == 81920
__UpperCAmelCase : int = audio[27780:27790]
__UpperCAmelCase : Optional[Any] = np.array([-0.2_131, -0.0_873, -0.0_124, -0.0_189, 0.0_569, 0.1_373, 0.1_883, 0.2_886, 0.3_297, 0.2_212] )
__UpperCAmelCase : Dict = np.abs(expected_slice - audio_slice ).max()
assert max_diff < 3e-2
| 63 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_lowerCAmelCase : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowerCAmelCase : str = '''
Examples:
```py
>>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline
>>> from diffusers.utils import load_image
>>> import torch
>>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16
... )
>>> pipe_prior.to("cuda")
>>> prompt = "A red cartoon frog, 4k"
>>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)
>>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(
... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16
... )
>>> pipe.to("cuda")
>>> init_image = load_image(
... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"
... "/kandinsky/frog.png"
... )
>>> image = pipe(
... image=init_image,
... image_embeds=image_emb,
... negative_image_embeds=zero_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... strength=0.2,
... ).images
>>> image[0].save("red_frog.png")
```
'''
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase=8 ) -> Tuple:
'''simple docstring'''
_lowerCamelCase : int = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
_lowerCamelCase : Optional[Any] = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def lowerCamelCase_( _lowerCamelCase , _lowerCamelCase=512 , _lowerCamelCase=512 ) -> int:
'''simple docstring'''
_lowerCamelCase : int = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
_lowerCamelCase : Union[str, Any] = np.array(pil_image.convert("RGB" ) )
_lowerCamelCase : Any = arr.astype(np.floataa ) / 1_2_7.5 - 1
_lowerCamelCase : Optional[Any] = np.transpose(_lowerCamelCase , [2, 0, 1] )
_lowerCamelCase : Any = torch.from_numpy(_lowerCamelCase ).unsqueeze(0 )
return image
class A_ ( _a ):
def __init__( self: Any ,__lowerCAmelCase: UNetaDConditionModel ,__lowerCAmelCase: DDPMScheduler ,__lowerCAmelCase: VQModel ,):
'''simple docstring'''
super().__init__()
self.register_modules(
unet=__lowerCAmelCase ,scheduler=__lowerCAmelCase ,movq=__lowerCAmelCase ,)
_lowerCamelCase : List[str] = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _lowercase ( self: Dict ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: Tuple ):
'''simple docstring'''
_lowerCamelCase : int = min(int(num_inference_steps * strength ) ,__lowerCAmelCase )
_lowerCamelCase : Tuple = max(num_inference_steps - init_timestep ,0 )
_lowerCamelCase : Optional[int] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _lowercase ( self: Optional[int] ,__lowerCAmelCase: Any ,__lowerCAmelCase: Any ,__lowerCAmelCase: Optional[Any] ,__lowerCAmelCase: Union[str, Any] ,__lowerCAmelCase: Optional[int] ,__lowerCAmelCase: List[Any] ,__lowerCAmelCase: List[str]=None ):
'''simple docstring'''
if not isinstance(__lowerCAmelCase ,(torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__lowerCAmelCase )}""" )
_lowerCamelCase : Any = image.to(device=__lowerCAmelCase ,dtype=__lowerCAmelCase )
_lowerCamelCase : Optional[Any] = batch_size * num_images_per_prompt
if image.shape[1] == 4:
_lowerCamelCase : List[Any] = image
else:
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ) and len(__lowerCAmelCase ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(__lowerCAmelCase )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
elif isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : List[Any] = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(__lowerCAmelCase )
]
_lowerCamelCase : Tuple = torch.cat(__lowerCAmelCase ,dim=0 )
else:
_lowerCamelCase : int = self.movq.encode(__lowerCAmelCase ).latent_dist.sample(__lowerCAmelCase )
_lowerCamelCase : int = self.movq.config.scaling_factor * init_latents
_lowerCamelCase : Tuple = torch.cat([init_latents] ,dim=0 )
_lowerCamelCase : Optional[int] = init_latents.shape
_lowerCamelCase : int = randn_tensor(__lowerCAmelCase ,generator=__lowerCAmelCase ,device=__lowerCAmelCase ,dtype=__lowerCAmelCase )
# get latents
_lowerCamelCase : Union[str, Any] = self.scheduler.add_noise(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : str = init_latents
return latents
def _lowercase ( self: List[Any] ,__lowerCAmelCase: Optional[int]=0 ):
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("Please install accelerate via `pip install accelerate`" )
_lowerCamelCase : str = torch.device(F"""cuda:{gpu_id}""" )
_lowerCamelCase : Dict = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(__lowerCAmelCase ,__lowerCAmelCase )
def _lowercase ( self: List[Any] ,__lowerCAmelCase: int=0 ):
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(">=" ,"0.17.0.dev0" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher." )
_lowerCamelCase : List[str] = torch.device(F"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("cpu" ,silence_dtype_warnings=__lowerCAmelCase )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_lowerCamelCase : str = None
for cpu_offloaded_model in [self.unet, self.movq]:
_lowerCamelCase, _lowerCamelCase : str = cpu_offload_with_hook(__lowerCAmelCase ,__lowerCAmelCase ,prev_module_hook=__lowerCAmelCase )
# We'll offload the last model manually.
_lowerCamelCase : int = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _lowercase ( self: Union[str, Any] ):
'''simple docstring'''
if not hasattr(self.unet ,"_hf_hook" ):
return self.device
for module in self.unet.modules():
if (
hasattr(__lowerCAmelCase ,"_hf_hook" )
and hasattr(module._hf_hook ,"execution_device" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(__lowerCAmelCase )
def __call__( self: Dict ,__lowerCAmelCase: Union[torch.FloatTensor, List[torch.FloatTensor]] ,__lowerCAmelCase: Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] ,__lowerCAmelCase: Union[torch.FloatTensor, List[torch.FloatTensor]] ,__lowerCAmelCase: int = 512 ,__lowerCAmelCase: int = 512 ,__lowerCAmelCase: int = 100 ,__lowerCAmelCase: float = 4.0 ,__lowerCAmelCase: float = 0.3 ,__lowerCAmelCase: int = 1 ,__lowerCAmelCase: Optional[Union[torch.Generator, List[torch.Generator]]] = None ,__lowerCAmelCase: Optional[str] = "pil" ,__lowerCAmelCase: bool = True ,):
'''simple docstring'''
_lowerCamelCase : Optional[int] = self._execution_device
_lowerCamelCase : Dict = guidance_scale > 1.0
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : int = torch.cat(__lowerCAmelCase ,dim=0 )
_lowerCamelCase : Any = image_embeds.shape[0]
if isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : str = torch.cat(__lowerCAmelCase ,dim=0 )
if do_classifier_free_guidance:
_lowerCamelCase : List[str] = image_embeds.repeat_interleave(__lowerCAmelCase ,dim=0 )
_lowerCamelCase : Optional[int] = negative_image_embeds.repeat_interleave(__lowerCAmelCase ,dim=0 )
_lowerCamelCase : Optional[Any] = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(dtype=self.unet.dtype ,device=__lowerCAmelCase )
if not isinstance(__lowerCAmelCase ,__lowerCAmelCase ):
_lowerCamelCase : Tuple = [image]
if not all(isinstance(__lowerCAmelCase ,(PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F"""Input is in incorrect format: {[type(__lowerCAmelCase ) for i in image]}. Currently, we only support PIL image and pytorch tensor""" )
_lowerCamelCase : Union[str, Any] = torch.cat([prepare_image(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ) for i in image] ,dim=0 )
_lowerCamelCase : str = image.to(dtype=image_embeds.dtype ,device=__lowerCAmelCase )
_lowerCamelCase : Tuple = self.movq.encode(__lowerCAmelCase )["latents"]
_lowerCamelCase : List[str] = latents.repeat_interleave(__lowerCAmelCase ,dim=0 )
self.scheduler.set_timesteps(__lowerCAmelCase ,device=__lowerCAmelCase )
_lowerCamelCase, _lowerCamelCase : Optional[Any] = self.get_timesteps(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
_lowerCamelCase : Any = timesteps[:1].repeat(batch_size * num_images_per_prompt )
_lowerCamelCase, _lowerCamelCase : Tuple = downscale_height_and_width(__lowerCAmelCase ,__lowerCAmelCase ,self.movq_scale_factor )
_lowerCamelCase : List[Any] = self.prepare_latents(
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,image_embeds.dtype ,__lowerCAmelCase ,__lowerCAmelCase )
for i, t in enumerate(self.progress_bar(__lowerCAmelCase ) ):
# expand the latents if we are doing classifier free guidance
_lowerCamelCase : Union[str, Any] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_lowerCamelCase : List[str] = {"image_embeds": image_embeds}
_lowerCamelCase : Tuple = self.unet(
sample=__lowerCAmelCase ,timestep=__lowerCAmelCase ,encoder_hidden_states=__lowerCAmelCase ,added_cond_kwargs=__lowerCAmelCase ,return_dict=__lowerCAmelCase ,)[0]
if do_classifier_free_guidance:
_lowerCamelCase, _lowerCamelCase : Tuple = noise_pred.split(latents.shape[1] ,dim=1 )
_lowerCamelCase, _lowerCamelCase : Dict = noise_pred.chunk(2 )
_lowerCamelCase, _lowerCamelCase : str = variance_pred.chunk(2 )
_lowerCamelCase : Tuple = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_lowerCamelCase : Any = torch.cat([noise_pred, variance_pred_text] ,dim=1 )
if not (
hasattr(self.scheduler.config ,"variance_type" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_lowerCamelCase, _lowerCamelCase : Union[str, Any] = noise_pred.split(latents.shape[1] ,dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_lowerCamelCase : Optional[int] = self.scheduler.step(
__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase ,generator=__lowerCAmelCase ,)[0]
# post-processing
_lowerCamelCase : Optional[int] = self.movq.decode(__lowerCAmelCase ,force_not_quantize=__lowerCAmelCase )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
_lowerCamelCase : Optional[int] = image * 0.5 + 0.5
_lowerCamelCase : str = image.clamp(0 ,1 )
_lowerCamelCase : Optional[int] = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
_lowerCamelCase : str = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCAmelCase ) | 46 | 0 |
'''simple docstring'''
def snake_case ( a_ : list , a_ : list , a_ : int , a_ : int , a_ : int ) -> int:
"""simple docstring"""
if index == number_of_items:
return 0
UpperCamelCase_ : List[str] = 0
UpperCamelCase_ : Optional[Any] = 0
UpperCamelCase_ : Tuple = knapsack(a_ , a_ , a_ , a_ , index + 1 )
if weights[index] <= max_weight:
UpperCamelCase_ : Any = values[index] + knapsack(
a_ , a_ , a_ , max_weight - weights[index] , index + 1 )
return max(a_ , a_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721 |
'''simple docstring'''
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
UpperCamelCase ="\\n@misc{wu2016googles,\n title={Google's Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},\n author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey\n and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin\n Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto\n Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and\n Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes\n and Jeffrey Dean},\n year={2016},\n eprint={1609.08144},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"
UpperCamelCase ="\\nThe BLEU score has some undesirable properties when used for single\nsentences, as it was designed to be a corpus measure. We therefore\nuse a slightly different score for our RL experiments which we call\nthe 'GLEU score'. For the GLEU score, we record all sub-sequences of\n1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then\ncompute a recall, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the target (ground truth) sequence,\nand a precision, which is the ratio of the number of matching n-grams\nto the number of total n-grams in the generated output sequence. Then\nGLEU score is simply the minimum of recall and precision. This GLEU\nscore's range is always between 0 (no matches) and 1 (all match) and\nit is symmetrical when switching output and target. According to\nour experiments, GLEU score correlates quite well with the BLEU\nmetric on a corpus level but does not have its drawbacks for our per\nsentence reward objective.\n"
UpperCamelCase ="\\nComputes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.\nInstead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching\ntokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.\n\nArgs:\n predictions (list of str): list of translations to score.\n Each translation should be tokenized into a list of tokens.\n references (list of list of str): list of lists of references for each translation.\n Each reference should be tokenized into a list of tokens.\n min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.\n max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.\n\nReturns:\n 'google_bleu': google_bleu score\n\nExamples:\n Example 1:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.44\n\n Example 2:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.61\n\n Example 3:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.53\n\n Example 4:\n >>> hyp1 = ['It', 'is', 'a', 'guide', 'to', 'action', 'which',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'always',\n ... 'disobeys', 'the', 'commands', 'of', 'the', 'cat']\n >>> ref1a = ['It', 'is', 'the', 'guiding', 'principle', 'which',\n ... 'guarantees', 'the', 'rubber', 'duck', 'forces', 'never',\n ... 'being', 'under', 'the', 'command', 'of', 'the', 'cat']\n >>> ref1b = ['It', 'is', 'a', 'guide', 'to', 'action', 'that',\n ... 'ensures', 'that', 'the', 'rubber', 'duck', 'will', 'never',\n ... 'heed', 'the', 'cat', 'commands']\n >>> ref1c = ['It', 'is', 'the', 'practical', 'guide', 'for', 'the',\n ... 'rubber', 'duck', 'army', 'never', 'to', 'heed', 'the', 'directions',\n ... 'of', 'the', 'cat']\n\n >>> hyp2 = ['he', 'read', 'the', 'book', 'because', 'he', 'was',\n ... 'interested', 'in', 'world', 'history']\n >>> ref2a = ['he', 'was', 'interested', 'in', 'world', 'history',\n ... 'because', 'he', 'read', 'the', 'book']\n\n >>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]\n >>> hypotheses = [hyp1, hyp2]\n >>> google_bleu = datasets.load_metric(\"google_bleu\")\n >>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)\n >>> print(round(results[\"google_bleu\"], 2))\n 0.4\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION, _KWARGS_DESCRIPTION )
class A ( datasets.Metric ):
"""simple docstring"""
def _UpperCAmelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , )
def _UpperCAmelCase ( self , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = 1 , __lowerCAmelCase = 4 , ):
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__lowerCAmelCase , hypotheses=__lowerCAmelCase , min_len=__lowerCAmelCase , max_len=__lowerCAmelCase )
}
| 543 | 0 |
import argparse
import os
import torch
from transformers import FlavaImageCodebook, FlavaImageCodebookConfig
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Any , snake_case__ :int , snake_case__ :Union[str, Any] , snake_case__ :Optional[int] ) -> Tuple:
_lowercase = s.rsplit(snake_case__ , snake_case__ )
return new.join(snake_case__ )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :List[str] ) -> str:
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if 'encoder.embeddings' not in key else 0 for key, param in state_dict.items() )
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Union[str, Any] ) -> int:
_lowercase = {}
_lowercase = ['group_1', 'group_2', 'group_3', 'group_4']
for key, value in state_dict.items():
for group_key in group_keys:
if group_key in key:
_lowercase = key.replace(F"""{group_key}.""" , F"""{group_key}.group.""" )
if "res_path" in key:
_lowercase = key.replace('res_path.' , 'res_path.path.' )
if key.endswith('.w' ):
_lowercase = rreplace(snake_case__ , '.w' , '.weight' , 1 )
if key.endswith('.b' ):
_lowercase = rreplace(snake_case__ , '.b' , '.bias' , 1 )
_lowercase = value.float()
return upgrade
@torch.no_grad()
def SCREAMING_SNAKE_CASE__ ( snake_case__ :Optional[int] , snake_case__ :Tuple , snake_case__ :List[str]=None , snake_case__ :List[Any]=True ) -> int:
from dall_e import Encoder
_lowercase = Encoder()
if os.path.exists(snake_case__ ):
_lowercase = torch.load(snake_case__ )
else:
_lowercase = torch.hub.load_state_dict_from_url(snake_case__ )
if isinstance(snake_case__ , snake_case__ ):
_lowercase = ckpt.state_dict()
encoder.load_state_dict(snake_case__ )
if config_path is not None:
_lowercase = FlavaImageCodebookConfig.from_pretrained(snake_case__ )
else:
_lowercase = FlavaImageCodebookConfig()
_lowercase = FlavaImageCodebook(snake_case__ ).eval()
_lowercase = encoder.state_dict()
_lowercase = upgrade_state_dict(snake_case__ )
hf_model.load_state_dict(snake_case__ )
_lowercase = hf_model.state_dict()
_lowercase = count_parameters(snake_case__ )
_lowercase = count_parameters(snake_case__ )
assert torch.allclose(snake_case__ , snake_case__ , atol=1E-3 )
if save_checkpoint:
hf_model.save_pretrained(snake_case__ )
else:
return hf_state_dict
if __name__ == "__main__":
snake_case = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
snake_case = parser.parse_args()
convert_dalle_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 67 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline | 67 | 1 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )->str:
'''simple docstring'''
A_ : Union[str, Any] = params
A_ : Dict = np.array(_SCREAMING_SNAKE_CASE )
A_ : List[Any] = np.array([len(_SCREAMING_SNAKE_CASE ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self , _SCREAMING_SNAKE_CASE )->Optional[Any]:
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self )->str:
'''simple docstring'''
return len(self.lengths )
def _snake_case ( self )->int:
'''simple docstring'''
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def _snake_case ( self )->Optional[int]:
'''simple docstring'''
A_ : Dict = self.params.max_model_input_size
A_ : Any = self.lengths > max_len
logger.info(F'''Splitting {sum(_SCREAMING_SNAKE_CASE )} too long sequences.''' )
def divide_chunks(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return [l[i : i + n] for i in range(0 , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )]
A_ : int = []
A_ : Any = []
if self.params.mlm:
A_ , A_ : Tuple = self.params.special_tok_ids['''cls_token'''], self.params.special_tok_ids['''sep_token''']
else:
A_ , A_ : str = self.params.special_tok_ids['''bos_token'''], self.params.special_tok_ids['''eos_token''']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
A_ : Tuple = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
A_ : int = np.insert(_SCREAMING_SNAKE_CASE , 0 , _SCREAMING_SNAKE_CASE )
if sub_s[-1] != sep_id:
A_ : List[str] = np.insert(_SCREAMING_SNAKE_CASE , len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
assert len(_SCREAMING_SNAKE_CASE ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(_SCREAMING_SNAKE_CASE )
new_tok_ids.extend(_SCREAMING_SNAKE_CASE )
new_lengths.extend([len(_SCREAMING_SNAKE_CASE ) for l in sub_seqs] )
A_ : List[str] = np.array(_SCREAMING_SNAKE_CASE )
A_ : Any = np.array(_SCREAMING_SNAKE_CASE )
def _snake_case ( self )->Tuple:
'''simple docstring'''
A_ : Tuple = len(self )
A_ : int = self.lengths > 11
A_ : Dict = self.token_ids[indices]
A_ : Optional[int] = self.lengths[indices]
A_ : List[Any] = len(self )
logger.info(F'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def _snake_case ( self )->Dict:
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
A_ : Dict = self.params.special_tok_ids['''unk_token''']
A_ : Optional[Any] = len(self )
A_ : str = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
A_ : Optional[Any] = (unk_occs / self.lengths) < 0.5
A_ : Tuple = self.token_ids[indices]
A_ : Optional[int] = self.lengths[indices]
A_ : str = len(self )
logger.info(F'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def _snake_case ( self )->str:
'''simple docstring'''
if not self.params.is_master:
return
logger.info(F'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def _snake_case ( self , _SCREAMING_SNAKE_CASE )->Optional[Any]:
'''simple docstring'''
A_ : Union[str, Any] = [t[0] for t in batch]
A_ : Tuple = [t[1] for t in batch]
assert len(_SCREAMING_SNAKE_CASE ) == len(_SCREAMING_SNAKE_CASE )
# Max for paddings
A_ : Optional[Any] = max(_SCREAMING_SNAKE_CASE )
# Pad token ids
if self.params.mlm:
A_ : Optional[int] = self.params.special_tok_ids['''pad_token''']
else:
A_ : Optional[int] = self.params.special_tok_ids['''unk_token''']
A_ : Dict = [list(t.astype(_SCREAMING_SNAKE_CASE ) ) + [pad_idx] * (max_seq_len_ - len(_SCREAMING_SNAKE_CASE )) for t in token_ids]
assert len(tk_ ) == len(_SCREAMING_SNAKE_CASE )
assert all(len(_SCREAMING_SNAKE_CASE ) == max_seq_len_ for t in tk_ )
A_ : int = torch.tensor(tk_ ) # (bs, max_seq_len_)
A_ : List[Any] = torch.tensor(_SCREAMING_SNAKE_CASE ) # (bs)
return tk_t, lg_t
| 152 |
import numpy as np
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
return 1 / (1 + np.exp(-vector ))
def _SCREAMING_SNAKE_CASE ( SCREAMING_SNAKE_CASE ):
return vector * sigmoid(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 152 | 1 |
"""simple docstring"""
import qiskit
def lowerCAmelCase_ ( snake_case_ : int = 2 ) ->qiskit.result.counts.Counts:
lowerCamelCase__ : Optional[Any] =qubits
# Using Aer's simulator
lowerCamelCase__ : Dict =qiskit.Aer.get_backend('aer_simulator' )
# Creating a Quantum Circuit acting on the q register
lowerCamelCase__ : List[str] =qiskit.QuantumCircuit(snake_case_ , snake_case_ )
# Adding a H gate on qubit 0 (now q0 in superposition)
circuit.h(0 )
for i in range(1 , snake_case_ ):
# Adding CX (CNOT) gate
circuit.cx(i - 1 , snake_case_ )
# Mapping the quantum measurement to the classical bits
circuit.measure(list(range(snake_case_ ) ) , list(range(snake_case_ ) ) )
# Now measuring any one qubit would affect other qubits to collapse
# their super position and have same state as the measured one.
# Executing the circuit on the simulator
lowerCamelCase__ : Union[str, Any] =qiskit.execute(snake_case_ , snake_case_ , shots=1_0_0_0 )
return job.result().get_counts(snake_case_ )
if __name__ == "__main__":
print(f"""Total count for various states are: {quantum_entanglement(3)}""") | 174 |
"""simple docstring"""
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
lowerCAmelCase = pytest.mark.integration
@require_faiss
class A_ ( A__ ):
"""simple docstring"""
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =Dataset.from_dict({'filename': ['my_name-train' + '_' + str(lowerCamelCase_ ) for x in np.arange(30 ).tolist()]} )
return dset
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
import faiss
lowerCamelCase__ : Dataset =self._create_dummy_dataset()
lowerCamelCase__ : int =dset.map(
lambda lowerCamelCase_ , lowerCamelCase_ : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=lowerCamelCase_ , keep_in_memory=lowerCamelCase_ )
lowerCamelCase__ : Any =dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
lowerCamelCase__ , lowerCamelCase__ : int =dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
import faiss
lowerCamelCase__ : Dataset =self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
lowerCamelCase__ , lowerCamelCase__ : List[str] =dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
import faiss
lowerCamelCase__ : Dataset =self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowerCamelCase_ ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase__ , lowerCamelCase__ : Tuple =dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
lowerCamelCase__ : Dataset =self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(lowerCamelCase_ , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
from elasticsearch import Elasticsearch
lowerCamelCase__ : Dataset =self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCamelCase__ : Any ={'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
lowerCamelCase__ : List[Any] ={'hits': {'hits': [{'_score': 1, '_id': 29}]}}
lowerCamelCase__ : Dict =Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=lowerCamelCase_ )
lowerCamelCase__ , lowerCamelCase__ : List[str] =dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class A_ ( A__ ):
"""simple docstring"""
def UpperCAmelCase__ ( self :Dict ):
"""simple docstring"""
import faiss
lowerCamelCase__ : Optional[int] =FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
lowerCamelCase__ : Any =np.zeros(5 , dtype=np.floataa )
lowerCamelCase__ : Dict =1
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] =index.search(lowerCamelCase_ )
self.assertRaises(lowerCamelCase_ , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
lowerCamelCase__ : int =np.eye(5 , dtype=np.floataa )[::-1]
lowerCamelCase__ , lowerCamelCase__ : Dict =index.search_batch(lowerCamelCase_ )
self.assertRaises(lowerCamelCase_ , index.search_batch , queries[0] )
lowerCamelCase__ : List[str] =[scores[0] for scores in total_scores]
lowerCamelCase__ : str =[indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCamelCase_ ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , lowerCamelCase_ )
def UpperCAmelCase__ ( self :Dict ):
"""simple docstring"""
import faiss
lowerCamelCase__ : Optional[int] =FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
lowerCamelCase__ : Union[str, Any] =FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(lowerCamelCase_ ):
lowerCamelCase__ : List[Any] =FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def UpperCAmelCase__ ( self :Tuple ):
"""simple docstring"""
import faiss
lowerCamelCase__ : Any =faiss.IndexFlat(5 )
lowerCamelCase__ : Any =FaissIndex(custom_index=lowerCamelCase_ )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
import faiss
lowerCamelCase__ : int =FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=lowerCamelCase_ ) as tmp_file:
index.save(tmp_file.name )
lowerCamelCase__ : Any =FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
lowerCamelCase__ : Any =np.zeros(5 , dtype=np.floataa )
lowerCamelCase__ : str =1
lowerCamelCase__ , lowerCamelCase__ : Dict =index.search(lowerCamelCase_ )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def lowerCAmelCase_ ( snake_case_ : Dict ) ->int:
import faiss
lowerCamelCase__ : List[str] =FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
lowerCamelCase__ : Optional[int] ='index.faiss'
lowerCamelCase__ : Optional[Any] =f"""mock://{index_name}"""
index.save(snake_case_ , storage_options=mockfs.storage_options )
lowerCamelCase__ : Dict =FaissIndex.load(snake_case_ , storage_options=mockfs.storage_options )
lowerCamelCase__ : List[Any] =np.zeros(5 , dtype=np.floataa )
lowerCamelCase__ : Union[str, Any] =1
lowerCamelCase__ , lowerCamelCase__ : List[str] =index.search(snake_case_ )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class A_ ( A__ ):
"""simple docstring"""
def UpperCAmelCase__ ( self :Tuple ):
"""simple docstring"""
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
lowerCamelCase__ : Union[str, Any] =Elasticsearch()
lowerCamelCase__ : int ={'acknowledged': True}
lowerCamelCase__ : Optional[Any] =ElasticSearchIndex(es_client=lowerCamelCase_ )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
lowerCamelCase__ : Union[str, Any] ='foo'
lowerCamelCase__ : Optional[Any] ={'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCamelCase__ , lowerCamelCase__ : List[Any] =index.search(lowerCamelCase_ )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
lowerCamelCase__ : List[str] ='foo'
lowerCamelCase__ : Union[str, Any] ={'hits': {'hits': [{'_score': 1, '_id': 0}]}}
lowerCamelCase__ , lowerCamelCase__ : List[Any] =index.search(lowerCamelCase_ , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
lowerCamelCase__ : List[str] =['foo', 'bar', 'foobar']
lowerCamelCase__ : str ={'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] =index.search_batch(lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] =[scores[0] for scores in total_scores]
lowerCamelCase__ : Dict =[indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCamelCase_ ) , 0 )
self.assertListEqual([1, 1, 1] , lowerCamelCase_ )
# batched queries with timeout
lowerCamelCase__ : str =['foo', 'bar', 'foobar']
lowerCamelCase__ : Any ={'hits': {'hits': [{'_score': 1, '_id': 1}]}}
lowerCamelCase__ , lowerCamelCase__ : Dict =index.search_batch(lowerCamelCase_ , request_timeout=30 )
lowerCamelCase__ : List[str] =[scores[0] for scores in total_scores]
lowerCamelCase__ : int =[indices[0] for indices in total_indices]
self.assertGreater(np.min(lowerCamelCase_ ) , 0 )
self.assertListEqual([1, 1, 1] , lowerCamelCase_ ) | 174 | 1 |
'''simple docstring'''
import argparse
import json
import os
from pathlib import Path
import requests
import torch
from transformers import JukeboxConfig, JukeboxModel
from transformers.utils import logging
logging.set_verbosity_info()
a_ = logging.get_logger(__name__)
a_ = "https://openaipublic.azureedge.net/jukebox/models/"
a_ = {
"jukebox-1b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"1b_lyrics/prior_level_2.pth.tar",
],
"jukebox-5b-lyrics": [
"5b/vqvae.pth.tar",
"5b/prior_level_0.pth.tar",
"5b/prior_level_1.pth.tar",
"5b_lyrics/prior_level_2.pth.tar",
],
}
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
if key.endswith(".model.1.bias" ) and len(key.split("." ) ) > 1_0:
snake_case_ : int = key.replace(".model.1.bias", ".conv1d_1.bias" )
elif key.endswith(".model.1.weight" ) and len(key.split("." ) ) > 1_0:
snake_case_ : int = key.replace(".model.1.weight", ".conv1d_1.weight" )
elif key.endswith(".model.3.bias" ) and len(key.split("." ) ) > 1_0:
snake_case_ : Dict = key.replace(".model.3.bias", ".conv1d_2.bias" )
elif key.endswith(".model.3.weight" ) and len(key.split("." ) ) > 1_0:
snake_case_ : List[str] = key.replace(".model.3.weight", ".conv1d_2.weight" )
if "conditioner_blocks.0." in key:
snake_case_ : Optional[Any] = key.replace("conditioner_blocks.0", "conditioner_blocks" )
if "prime_prior" in key:
snake_case_ : Optional[int] = key.replace("prime_prior", "encoder" )
if ".emb." in key and "total" not in key and "absolute" not in key and "relative" not in key:
snake_case_ : str = key.replace(".emb.", "." )
if key.endswith("k" ): # replace vqvae.X.k with vqvae.X.codebook
return key.replace(".k", ".codebook" )
if "y_emb." in key:
return key.replace("y_emb.", "metadata_embedding." )
if "x_emb.emb." in key:
snake_case_ : Tuple = key.replace("0.x_emb.emb", "embed_tokens" )
if "prime_state_ln" in key:
return key.replace("prime_state_ln", "encoder.final_layer_norm" )
if ".ln" in key:
return key.replace(".ln", ".layer_norm" )
if "_ln" in key:
return key.replace("_ln", "_layer_norm" )
if "prime_state_proj" in key:
return key.replace("prime_state_proj", "encoder.proj_in" )
if "prime_x_out" in key:
return key.replace("prime_x_out", "encoder.lm_head" )
if "prior.x_out" in key:
return key.replace("x_out", "fc_proj_out" )
if "x_emb" in key:
return key.replace("x_emb", "embed_tokens" )
return key
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE, __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case_ : Optional[Any] = {}
import re
snake_case_ : Dict = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
snake_case_ : Optional[int] = re.compile(
r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
snake_case_ : str = re.compile(r"encoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
snake_case_ : Optional[int] = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).(bias|weight)" )
snake_case_ : List[str] = re.compile(
r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
snake_case_ : str = re.compile(r"decoders.(\d*).level_blocks.(\d*).model.(\d*).(bias|weight)" )
snake_case_ : Union[str, Any] = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).(bias|weight)" )
snake_case_ : Optional[Any] = re.compile(
r"conditioner_blocks.(\d*).cond.model.(\d*).(\d).model.(\d*).model.(\d*).(bias|weight)" )
snake_case_ : Any = re.compile(r"conditioner_blocks.(\d*).cond.model.(\d*).(bias|weight)" )
for original_key, value in state_dict.items():
# rename vqvae.encoder keys
if re_encoder_block_conv_in.fullmatch(__lowercase ):
snake_case_ : Optional[int] = re_encoder_block_conv_in.match(__lowercase )
snake_case_ : Dict = regex_match.groups()
snake_case_ : List[Any] = int(groups[2] ) * 2 + int(groups[3] )
snake_case_ : Optional[int] = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.{groups[-1]}'
snake_case_ : Optional[Any] = re_encoder_block_conv_in.sub(__lowercase, __lowercase )
elif re_encoder_block_resnet.fullmatch(__lowercase ):
snake_case_ : Any = re_encoder_block_resnet.match(__lowercase )
snake_case_ : Dict = regex_match.groups()
snake_case_ : Optional[int] = int(groups[2] ) * 2 + int(groups[3] )
snake_case_ : Any = {'1': 1, '3': 2}[groups[-2]]
snake_case_ : Tuple = f'encoders.{groups[0]}.level_blocks.{groups[1]}.downsample_block.{block_index}.'
snake_case_ : Tuple = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
snake_case_ : str = prefix + resnet_block
snake_case_ : Union[str, Any] = re_encoder_block_resnet.sub(__lowercase, __lowercase )
elif re_encoder_block_proj_out.fullmatch(__lowercase ):
snake_case_ : Union[str, Any] = re_encoder_block_proj_out.match(__lowercase )
snake_case_ : Any = regex_match.groups()
snake_case_ : Optional[Any] = f'encoders.{groups[0]}.level_blocks.{groups[1]}.proj_out.{groups[-1]}'
snake_case_ : Dict = re_encoder_block_proj_out.sub(__lowercase, __lowercase )
# rename vqvae.decoder keys
elif re_decoder_block_conv_out.fullmatch(__lowercase ):
snake_case_ : Union[str, Any] = re_decoder_block_conv_out.match(__lowercase )
snake_case_ : Dict = regex_match.groups()
snake_case_ : List[str] = int(groups[2] ) * 2 + int(groups[3] ) - 2
snake_case_ : str = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.{groups[-1]}'
snake_case_ : List[str] = re_decoder_block_conv_out.sub(__lowercase, __lowercase )
elif re_decoder_block_resnet.fullmatch(__lowercase ):
snake_case_ : Tuple = re_decoder_block_resnet.match(__lowercase )
snake_case_ : int = regex_match.groups()
snake_case_ : Tuple = int(groups[2] ) * 2 + int(groups[3] ) - 2
snake_case_ : List[str] = {'1': 1, '3': 2}[groups[-2]]
snake_case_ : List[Any] = f'decoders.{groups[0]}.level_blocks.{groups[1]}.upsample_block.{block_index}.'
snake_case_ : Optional[int] = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
snake_case_ : List[str] = prefix + resnet_block
snake_case_ : List[str] = re_decoder_block_resnet.sub(__lowercase, __lowercase )
elif re_decoder_block_proj_in.fullmatch(__lowercase ):
snake_case_ : str = re_decoder_block_proj_in.match(__lowercase )
snake_case_ : Any = regex_match.groups()
snake_case_ : List[Any] = f'decoders.{groups[0]}.level_blocks.{groups[1]}.proj_in.{groups[-1]}'
snake_case_ : str = re_decoder_block_proj_in.sub(__lowercase, __lowercase )
# rename prior cond.model to upsampler.upsample_block and resnet
elif re_prior_cond_conv_out.fullmatch(__lowercase ):
snake_case_ : Tuple = re_prior_cond_conv_out.match(__lowercase )
snake_case_ : List[Any] = regex_match.groups()
snake_case_ : Dict = int(groups[1] ) * 2 + int(groups[2] ) - 2
snake_case_ : Union[str, Any] = f'conditioner_blocks.upsampler.upsample_block.{block_index}.{groups[-1]}'
snake_case_ : Optional[Any] = re_prior_cond_conv_out.sub(__lowercase, __lowercase )
elif re_prior_cond_resnet.fullmatch(__lowercase ):
snake_case_ : Optional[Any] = re_prior_cond_resnet.match(__lowercase )
snake_case_ : Optional[int] = regex_match.groups()
snake_case_ : Any = int(groups[1] ) * 2 + int(groups[2] ) - 2
snake_case_ : int = {'1': 1, '3': 2}[groups[-2]]
snake_case_ : Optional[int] = f'conditioner_blocks.upsampler.upsample_block.{block_index}.'
snake_case_ : Optional[int] = f'resnet_block.{groups[-3]}.conv1d_{conv_index}.{groups[-1]}'
snake_case_ : List[str] = prefix + resnet_block
snake_case_ : str = re_prior_cond_resnet.sub(__lowercase, __lowercase )
elif re_prior_cond_proj_in.fullmatch(__lowercase ):
snake_case_ : Any = re_prior_cond_proj_in.match(__lowercase )
snake_case_ : int = regex_match.groups()
snake_case_ : Optional[int] = f'conditioner_blocks.upsampler.proj_in.{groups[-1]}'
snake_case_ : List[Any] = re_prior_cond_proj_in.sub(__lowercase, __lowercase )
# keep original key
else:
snake_case_ : Optional[int] = original_key
snake_case_ : List[Any] = replace_key(__lowercase )
if f'{key_prefix}.{key}' not in model_state_dict or key is None:
print(f'failed converting {original_key} to {key}, does not match' )
# handle missmatched shape
elif value.shape != model_state_dict[f'{key_prefix}.{key}'].shape:
snake_case_ : Optional[int] = model_state_dict[f'{key_prefix}.{key}']
print(f'{original_key}-> {key} : \nshape {val.shape} and { value.shape}, do not match' )
snake_case_ : Dict = original_key
snake_case_ : str = original_key
snake_case_ : int = value
return new_dict
@torch.no_grad()
def UpperCamelCase_ ( __SCREAMING_SNAKE_CASE=None, __SCREAMING_SNAKE_CASE=None ):
"""simple docstring"""
for file in MODEL_MAPPING[model_name]:
if not os.path.isfile(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}' ):
snake_case_ : Union[str, Any] = requests.get(f'{PREFIX}{file}', allow_redirects=__lowercase )
os.makedirs(f'{pytorch_dump_folder_path}/', exist_ok=__lowercase )
open(f'{pytorch_dump_folder_path}/{file.split("/" )[-1]}', "wb" ).write(r.content )
snake_case_ : List[str] = MODEL_MAPPING[model_name.split("/" )[-1]]
snake_case_ : Any = JukeboxConfig.from_pretrained(__lowercase )
snake_case_ : str = JukeboxModel(__lowercase )
snake_case_ : Optional[Any] = []
snake_case_ : Union[str, Any] = {}
for i, dict_name in enumerate(__lowercase ):
snake_case_ : Tuple = torch.load(f'{pytorch_dump_folder_path}/{dict_name.split("/" )[-1]}' )['model']
snake_case_ : Union[str, Any] = {}
for k in old_dic.keys():
if k.endswith(".b" ):
snake_case_ : Union[str, Any] = old_dic[k]
elif k.endswith(".w" ):
snake_case_ : Dict = old_dic[k]
elif "level_2" not in dict_name and "cond.model." in k:
snake_case_ : Optional[Any] = old_dic[k]
else:
snake_case_ : Optional[int] = old_dic[k]
snake_case_ : Tuple = 'vqvae' if i == 0 else f'priors.{3 - i}'
snake_case_ : Union[str, Any] = fix_jukebox_keys(__lowercase, model.state_dict(), __lowercase, __lowercase )
weight_dict.append(__lowercase )
snake_case_ : Any = weight_dict.pop(0 )
model.vqvae.load_state_dict(__lowercase )
for i in range(len(__lowercase ) ):
model.priors[i].load_state_dict(weight_dict[2 - i] )
Path(__lowercase ).mkdir(exist_ok=__lowercase )
with open(f'{pytorch_dump_folder_path}/mapping.json', "w" ) as txtfile:
json.dump(__lowercase, __lowercase )
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(__lowercase )
return weight_dict
if __name__ == "__main__":
a_ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default="jukebox-5b-lyrics",
type=str,
help="Name of the model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path",
default="jukebox-5b-lyrics-converted",
type=str,
help="Path to the output PyTorch model directory.",
)
a_ = parser.parse_args()
convert_openai_checkpoint(args.model_name, args.pytorch_dump_folder_path)
| 704 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
from typing import Generic, TypeVar
a_ = TypeVar("T")
a_ = TypeVar("U")
class UpperCAmelCase_ ( Generic[T, U] ):
def __init__( self , lowercase_ , lowercase_):
snake_case_ : Any = key
snake_case_ : List[Any] = val
snake_case_ : DoubleLinkedListNode[T, U] | None = None
snake_case_ : DoubleLinkedListNode[T, U] | None = None
def __repr__( self):
return (
F'Node: key: {self.key}, val: {self.val}, '
F'has next: {bool(self.next)}, has prev: {bool(self.prev)}'
)
class UpperCAmelCase_ ( Generic[T, U] ):
def __init__( self):
snake_case_ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase_ , lowercase_)
snake_case_ : DoubleLinkedListNode[T, U] = DoubleLinkedListNode(lowercase_ , lowercase_)
snake_case_ , snake_case_ : Union[str, Any] = self.rear, self.head
def __repr__( self):
snake_case_ : Dict = ["DoubleLinkedList"]
snake_case_ : Dict = self.head
while node.next is not None:
rep.append(str(lowercase_))
snake_case_ : List[str] = node.next
rep.append(str(self.rear))
return ",\n ".join(lowercase_)
def snake_case__ ( self , lowercase_):
snake_case_ : List[str] = self.rear.prev
# All nodes other than self.head are guaranteed to have non-None previous
assert previous is not None
snake_case_ : Tuple = node
snake_case_ : str = previous
snake_case_ : Optional[Any] = node
snake_case_ : Any = self.rear
def snake_case__ ( self , lowercase_):
if node.prev is None or node.next is None:
return None
snake_case_ : Union[str, Any] = node.next
snake_case_ : Optional[int] = node.prev
snake_case_ : str = None
snake_case_ : int = None
return node
class UpperCAmelCase_ ( Generic[T, U] ):
UpperCAmelCase_ = {}
def __init__( self , lowercase_):
snake_case_ : DoubleLinkedList[T, U] = DoubleLinkedList()
snake_case_ : List[str] = capacity
snake_case_ : Any = 0
snake_case_ : Dict = 0
snake_case_ : Union[str, Any] = 0
snake_case_ : dict[T, DoubleLinkedListNode[T, U]] = {}
def __repr__( self):
return (
F'CacheInfo(hits={self.hits}, misses={self.miss}, '
F'capacity={self.capacity}, current size={self.num_keys})'
)
def __contains__( self , lowercase_):
return key in self.cache
def snake_case__ ( self , lowercase_):
# Note: pythonic interface would throw KeyError rather than return None
if key in self.cache:
self.hits += 1
snake_case_ : DoubleLinkedListNode[T, U] = self.cache[key]
snake_case_ : Optional[Any] = self.list.remove(self.cache[key])
assert node == value_node
# node is guaranteed not None because it is in self.cache
assert node is not None
self.list.add(lowercase_)
return node.val
self.miss += 1
return None
def snake_case__ ( self , lowercase_ , lowercase_):
if key not in self.cache:
if self.num_keys >= self.capacity:
# delete first node (oldest) when over capacity
snake_case_ : Optional[Any] = self.list.head.next
# guaranteed to have a non-None first node when num_keys > 0
# explain to type checker via assertions
assert first_node is not None
assert first_node.key is not None
assert (
self.list.remove(lowercase_) is not None
) # node guaranteed to be in list assert node.key is not None
del self.cache[first_node.key]
self.num_keys -= 1
snake_case_ : Dict = DoubleLinkedListNode(lowercase_ , lowercase_)
self.list.add(self.cache[key])
self.num_keys += 1
else:
# bump node to the end of the list, update value
snake_case_ : List[Any] = self.list.remove(self.cache[key])
assert node is not None # node guaranteed to be in list
snake_case_ : Optional[Any] = value
self.list.add(lowercase_)
@classmethod
def snake_case__ ( cls , lowercase_ = 1_28):
def cache_decorator_inner(lowercase_) -> Callable[..., U]:
def cache_decorator_wrapper(*lowercase_) -> U:
if func not in cls.decorator_function_to_instance_map:
snake_case_ : List[str] = LRUCache(lowercase_)
snake_case_ : Optional[Any] = cls.decorator_function_to_instance_map[func].get(args[0])
if result is None:
snake_case_ : Any = func(*lowercase_)
cls.decorator_function_to_instance_map[func].put(args[0] , lowercase_)
return result
def cache_info() -> LRUCache[T, U]:
return cls.decorator_function_to_instance_map[func]
setattr(lowercase_ , "cache_info" , lowercase_) # noqa: B010
return cache_decorator_wrapper
return cache_decorator_inner
if __name__ == "__main__":
import doctest
doctest.testmod()
| 92 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.