code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
import unittest
import numpy as np
from transformers import MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING, TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
from transformers.pipelines import AudioClassificationPipeline, pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_torchaudio,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class lowercase ( unittest.TestCase ):
__lowercase : Optional[Any] = MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
__lowercase : Dict = TF_MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING
def __UpperCamelCase ( self , A_ , A_ , A_ ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase = AudioClassificationPipeline(model=A_ , feature_extractor=A_ )
# test with a raw waveform
UpperCamelCase = np.zeros((34_000,) )
UpperCamelCase = np.zeros((14_000,) )
return audio_classifier, [audioa, audio]
def __UpperCamelCase ( self , A_ , A_ ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase , UpperCamelCase = examples
UpperCamelCase = audio_classifier(A_ )
# by default a model is initialized with num_labels=2
self.assertEqual(
A_ , [
{'score': ANY(A_ ), 'label': ANY(A_ )},
{'score': ANY(A_ ), 'label': ANY(A_ )},
] , )
UpperCamelCase = audio_classifier(A_ , top_k=1 )
self.assertEqual(
A_ , [
{'score': ANY(A_ ), 'label': ANY(A_ )},
] , )
self.run_torchaudio(A_ )
@require_torchaudio
def __UpperCamelCase ( self , A_ ) -> Union[str, Any]:
"""simple docstring"""
import datasets
# test with a local file
UpperCamelCase = datasets.load_dataset('hf-internal-testing/librispeech_asr_dummy' , 'clean' , split='validation' )
UpperCamelCase = dataset[0]['audio']['array']
UpperCamelCase = audio_classifier(A_ )
self.assertEqual(
A_ , [
{'score': ANY(A_ ), 'label': ANY(A_ )},
{'score': ANY(A_ ), 'label': ANY(A_ )},
] , )
@require_torch
def __UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
UpperCamelCase = 'anton-l/wav2vec2-random-tiny-classifier'
UpperCamelCase = pipeline('audio-classification' , model=A_ )
UpperCamelCase = np.ones((8_000,) )
UpperCamelCase = audio_classifier(A_ , top_k=4 )
UpperCamelCase = [
{'score': 0.0842, 'label': 'no'},
{'score': 0.0838, 'label': 'up'},
{'score': 0.0837, 'label': 'go'},
{'score': 0.0834, 'label': 'right'},
]
UpperCamelCase = [
{'score': 0.0845, 'label': 'stop'},
{'score': 0.0844, 'label': 'on'},
{'score': 0.0841, 'label': 'right'},
{'score': 0.0834, 'label': 'left'},
]
self.assertIn(nested_simplify(A_ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
UpperCamelCase = {'array': np.ones((8_000,) ), 'sampling_rate': audio_classifier.feature_extractor.sampling_rate}
UpperCamelCase = audio_classifier(A_ , top_k=4 )
self.assertIn(nested_simplify(A_ , decimals=4 ) , [EXPECTED_OUTPUT, EXPECTED_OUTPUT_PT_2] )
@require_torch
@slow
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
import datasets
UpperCamelCase = 'superb/wav2vec2-base-superb-ks'
UpperCamelCase = pipeline('audio-classification' , model=A_ )
UpperCamelCase = datasets.load_dataset('anton-l/superb_dummy' , 'ks' , split='test' )
UpperCamelCase = np.array(dataset[3]['speech'] , dtype=np.floataa )
UpperCamelCase = audio_classifier(A_ , top_k=4 )
self.assertEqual(
nested_simplify(A_ , decimals=3 ) , [
{'score': 0.981, 'label': 'go'},
{'score': 0.007, 'label': 'up'},
{'score': 0.006, 'label': '_unknown_'},
{'score': 0.001, 'label': 'down'},
] , )
@require_tf
@unittest.skip('Audio classification is not implemented for TF' )
def __UpperCamelCase ( self ) -> Any:
"""simple docstring"""
pass
| 222 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_UpperCAmelCase : List[Any] = logging.get_logger(__name__)
_UpperCAmelCase : int = {"vocab_file": "vocab.txt"}
_UpperCAmelCase : str = {
"vocab_file": {
"openbmb/cpm-ant-10b": "https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt",
},
}
_UpperCAmelCase : Optional[Any] = {
"openbmb/cpm-ant-10b": 1_024,
}
def A ( lowercase ) -> Dict:
'''simple docstring'''
UpperCamelCase = collections.OrderedDict()
with open(lowercase , 'r' , encoding='utf-8' ) as reader:
UpperCamelCase = reader.readlines()
for index, token in enumerate(lowercase ):
UpperCamelCase = token.rstrip('\n' )
UpperCamelCase = index
return vocab
class lowercase ( _SCREAMING_SNAKE_CASE ):
def __init__( self , A_ , A_="<unk>" , A_=200 ) -> Dict:
"""simple docstring"""
UpperCamelCase = vocab
UpperCamelCase = unk_token
UpperCamelCase = max_input_chars_per_word
def __UpperCamelCase ( self , A_ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = list(A_ )
if len(A_ ) > self.max_input_chars_per_word:
return [self.unk_token]
UpperCamelCase = 0
UpperCamelCase = []
while start < len(A_ ):
UpperCamelCase = len(A_ )
UpperCamelCase = None
while start < end:
UpperCamelCase = ''.join(chars[start:end] )
if substr in self.vocab:
UpperCamelCase = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(A_ )
UpperCamelCase = end
return sub_tokens
class lowercase ( _SCREAMING_SNAKE_CASE ):
__lowercase : List[str] = VOCAB_FILES_NAMES
__lowercase : Dict = PRETRAINED_VOCAB_FILES_MAP
__lowercase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowercase : Any = ["input_ids", "attention_mask"]
__lowercase : Tuple = False
def __init__( self , A_ , A_="<d>" , A_="</d>" , A_="<s>" , A_="</s>" , A_="<pad>" , A_="<unk>" , A_="</n>" , A_="</_>" , A_="left" , **A_ , ) -> Tuple:
"""simple docstring"""
requires_backends(self , ['jieba'] )
super().__init__(
bod_token=A_ , eod_token=A_ , bos_token=A_ , eos_token=A_ , pad_token=A_ , unk_token=A_ , line_token=A_ , space_token=A_ , padding_side=A_ , **A_ , )
UpperCamelCase = bod_token
UpperCamelCase = eod_token
UpperCamelCase = load_vocab(A_ )
UpperCamelCase = self.encoder[space_token]
UpperCamelCase = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda A_ : x[1] ) )
UpperCamelCase = {v: k for k, v in self.encoder.items()}
UpperCamelCase = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __UpperCamelCase ( self ) -> Optional[int]:
"""simple docstring"""
return self.encoder[self.bod_token]
@property
def __UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
return self.encoder[self.eod_token]
@property
def __UpperCamelCase ( self ) -> Union[str, Any]:
"""simple docstring"""
return self.encoder["\n"]
@property
def __UpperCamelCase ( self ) -> int:
"""simple docstring"""
return len(self.encoder )
def __UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def __UpperCamelCase ( self , A_ ) -> List[Any]:
"""simple docstring"""
UpperCamelCase = []
for x in jieba.cut(A_ , cut_all=A_ ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(A_ ) )
return output_tokens
def __UpperCamelCase ( self , A_ , **A_ ) -> Dict:
"""simple docstring"""
UpperCamelCase = [i for i in token_ids if i >= 0]
UpperCamelCase = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(A_ , **A_ )
def __UpperCamelCase ( self , A_ ) -> int:
"""simple docstring"""
return token in self.encoder
def __UpperCamelCase ( self , A_ ) -> str:
"""simple docstring"""
return "".join(A_ )
def __UpperCamelCase ( self , A_ ) -> Tuple:
"""simple docstring"""
return self.encoder.get(A_ , self.encoder.get(self.unk_token ) )
def __UpperCamelCase ( self , A_ ) -> Optional[Any]:
"""simple docstring"""
return self.decoder.get(A_ , self.unk_token )
def __UpperCamelCase ( self , A_ , A_ = None ) -> Tuple[str]:
"""simple docstring"""
if os.path.isdir(A_ ):
UpperCamelCase = os.path.join(
A_ , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
UpperCamelCase = (filename_prefix + '-' if filename_prefix else '') + save_directory
UpperCamelCase = 0
if " " in self.encoder:
UpperCamelCase = self.encoder[' ']
del self.encoder[" "]
if "\n" in self.encoder:
UpperCamelCase = self.encoder['\n']
del self.encoder["\n"]
UpperCamelCase = collections.OrderedDict(sorted(self.encoder.items() , key=lambda A_ : x[1] ) )
with open(A_ , 'w' , encoding='utf-8' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
F'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
' Please check that the vocabulary is not corrupted!' )
UpperCamelCase = token_index
writer.write(token + '\n' )
index += 1
return (vocab_file,)
def __UpperCamelCase ( self , A_ , A_ = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __UpperCamelCase ( self , A_ , A_ = None , A_ = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ , token_ids_a=A_ , already_has_special_tokens=A_ )
if token_ids_a is not None:
return [1] + ([0] * len(A_ )) + [1] + ([0] * len(A_ ))
return [1] + ([0] * len(A_ ))
| 222 | 1 |
"""simple docstring"""
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
__lowercase = logging.get_logger(__name__)
__lowercase = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowercase = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
__lowercase = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
__lowercase = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
__lowercase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 512,
"""facebook/dpr-ctx_encoder-multiset-base""": 512,
}
__lowercase = {
"""facebook/dpr-question_encoder-single-nq-base""": 512,
"""facebook/dpr-question_encoder-multiset-base""": 512,
}
__lowercase = {
"""facebook/dpr-reader-single-nq-base""": 512,
"""facebook/dpr-reader-multiset-base""": 512,
}
__lowercase = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
__lowercase = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
__lowercase = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Dict = VOCAB_FILES_NAMES
UpperCAmelCase : Union[str, Any] = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Optional[int] = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : Any = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : List[str] = VOCAB_FILES_NAMES
UpperCAmelCase : Tuple = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Dict = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : Union[str, Any] = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__lowercase = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
__lowercase = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
__lowercase = R"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
```
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
```
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Returns:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(_a )
class _A :
"""simple docstring"""
def __call__( self : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[str] = None , __UpperCAmelCase : Optional[str] = None , __UpperCAmelCase : Union[bool, str] = False , __UpperCAmelCase : Union[bool, str] = False , __UpperCAmelCase : Optional[int] = None , __UpperCAmelCase : Optional[Union[str, TensorType]] = None , __UpperCAmelCase : Optional[bool] = None , **__UpperCAmelCase : int , ):
if titles is None and texts is None:
return super().__call__(
__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , return_tensors=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , **__UpperCAmelCase , )
elif titles is None or texts is None:
a : List[Any] = titles if texts is None else texts
return super().__call__(
__UpperCAmelCase , __UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , return_tensors=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , **__UpperCAmelCase , )
a : Union[str, Any] = titles if not isinstance(__UpperCAmelCase , __UpperCAmelCase) else [titles]
a : str = texts if not isinstance(__UpperCAmelCase , __UpperCAmelCase) else [texts]
a : List[Any] = len(__UpperCAmelCase)
a : str = questions if not isinstance(__UpperCAmelCase , __UpperCAmelCase) else [questions] * n_passages
if len(__UpperCAmelCase) != len(__UpperCAmelCase):
raise ValueError(
f'''There should be as many titles than texts but got {len(__UpperCAmelCase)} titles and {len(__UpperCAmelCase)} texts.''')
a : Optional[int] = super().__call__(__UpperCAmelCase , __UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase)["input_ids"]
a : Optional[Any] = super().__call__(__UpperCAmelCase , add_special_tokens=__UpperCAmelCase , padding=__UpperCAmelCase , truncation=__UpperCAmelCase)["input_ids"]
a : str = {
"input_ids": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(__UpperCAmelCase , __UpperCAmelCase)
]
}
if return_attention_mask is not False:
a : int = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id) for input_id in input_ids])
a : Tuple = attention_mask
return self.pad(__UpperCAmelCase , padding=__UpperCAmelCase , max_length=__UpperCAmelCase , return_tensors=__UpperCAmelCase)
def __snake_case ( self : Optional[Any] , __UpperCAmelCase : BatchEncoding , __UpperCAmelCase : DPRReaderOutput , __UpperCAmelCase : int = 16 , __UpperCAmelCase : int = 64 , __UpperCAmelCase : int = 4 , ):
a : List[str] = reader_input["input_ids"]
a : Any = reader_output[:3]
a : str = len(__UpperCAmelCase)
a : List[str] = sorted(range(__UpperCAmelCase) , reverse=__UpperCAmelCase , key=relevance_logits.__getitem__)
a : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
a : Tuple = list(input_ids[doc_id])
# assuming question & title information is at the beginning of the sequence
a : Optional[Any] = sequence_ids.index(self.sep_token_id , 2) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
a : Optional[int] = sequence_ids.index(self.pad_token_id)
else:
a : Optional[int] = len(__UpperCAmelCase)
a : List[str] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=__UpperCAmelCase , top_spans=__UpperCAmelCase , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=__UpperCAmelCase , start_index=__UpperCAmelCase , end_index=__UpperCAmelCase , text=self.decode(sequence_ids[start_index : end_index + 1]) , ))
if len(__UpperCAmelCase) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def __snake_case ( self : int , __UpperCAmelCase : List[int] , __UpperCAmelCase : List[int] , __UpperCAmelCase : int , __UpperCAmelCase : int , ):
a : str = []
for start_index, start_score in enumerate(__UpperCAmelCase):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length]):
scores.append(((start_index, start_index + answer_length), start_score + end_score))
a : Optional[Any] = sorted(__UpperCAmelCase , key=lambda __UpperCAmelCase: x[1] , reverse=__UpperCAmelCase)
a : Optional[Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f'''Wrong span indices: [{start_index}:{end_index}]''')
a : str = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f'''Span is too long: {length} > {max_answer_length}''')
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals):
continue
chosen_span_intervals.append((start_index, end_index))
if len(__UpperCAmelCase) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_a )
class _A ( _a ,_a ):
"""simple docstring"""
UpperCAmelCase : Optional[int] = VOCAB_FILES_NAMES
UpperCAmelCase : List[str] = READER_PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase : Optional[Any] = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase : Tuple = READER_PRETRAINED_INIT_CONFIGURATION
UpperCAmelCase : Optional[Any] = ["""input_ids""", """attention_mask"""]
| 361 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json""",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Union[str, Any] = """gpt_neox"""
def __init__( self : List[str] , __UpperCAmelCase : Tuple=50432 , __UpperCAmelCase : str=6144 , __UpperCAmelCase : Any=44 , __UpperCAmelCase : Union[str, Any]=64 , __UpperCAmelCase : Dict=24576 , __UpperCAmelCase : Any="gelu" , __UpperCAmelCase : List[Any]=0.25 , __UpperCAmelCase : Optional[Any]=10000 , __UpperCAmelCase : Tuple=0.0 , __UpperCAmelCase : Union[str, Any]=0.0 , __UpperCAmelCase : Optional[int]=0.1 , __UpperCAmelCase : List[Any]=2048 , __UpperCAmelCase : Optional[Any]=0.02 , __UpperCAmelCase : Optional[Any]=1e-5 , __UpperCAmelCase : Dict=True , __UpperCAmelCase : Union[str, Any]=0 , __UpperCAmelCase : Union[str, Any]=2 , __UpperCAmelCase : Any=False , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : int=None , **__UpperCAmelCase : Tuple , ):
super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase)
a : List[Any] = vocab_size
a : Optional[int] = max_position_embeddings
a : List[Any] = hidden_size
a : Union[str, Any] = num_hidden_layers
a : int = num_attention_heads
a : Union[str, Any] = intermediate_size
a : Optional[Any] = hidden_act
a : Dict = rotary_pct
a : Any = rotary_emb_base
a : Dict = attention_dropout
a : List[str] = hidden_dropout
a : List[str] = classifier_dropout
a : Any = initializer_range
a : Union[str, Any] = layer_norm_eps
a : int = use_cache
a : int = tie_word_embeddings
a : str = use_parallel_residual
a : Union[str, Any] = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
"The hidden size is not divisble by the number of attention heads! Make sure to update them!")
def __snake_case ( self : Any):
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , __UpperCAmelCase) or len(self.rope_scaling) != 2:
raise ValueError(
"`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, "
f'''got {self.rope_scaling}''')
a : str = self.rope_scaling.get("type" , __UpperCAmelCase)
a : List[str] = self.rope_scaling.get("factor" , __UpperCAmelCase)
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''')
if rope_scaling_factor is None or not isinstance(__UpperCAmelCase , __UpperCAmelCase) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''')
| 226 | 0 |
import re
import string
from collections import Counter
import sacrebleu
import sacremoses
from packaging import version
import datasets
UpperCAmelCase : List[Any] = "\n@inproceedings{xu-etal-2016-optimizing,\n title = {Optimizing Statistical Machine Translation for Text Simplification},\n authors={Xu, Wei and Napoles, Courtney and Pavlick, Ellie and Chen, Quanze and Callison-Burch, Chris},\n journal = {Transactions of the Association for Computational Linguistics},\n volume = {4},\n year={2016},\n url = {https://www.aclweb.org/anthology/Q16-1029},\n pages = {401--415\n},\n@inproceedings{post-2018-call,\n title = \"A Call for Clarity in Reporting {BLEU} Scores\",\n author = \"Post, Matt\",\n booktitle = \"Proceedings of the Third Conference on Machine Translation: Research Papers\",\n month = oct,\n year = \"2018\",\n address = \"Belgium, Brussels\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://www.aclweb.org/anthology/W18-6319\",\n pages = \"186--191\",\n}\n"
UpperCAmelCase : List[Any] = "\\nWIKI_SPLIT is the combination of three metrics SARI, EXACT and SACREBLEU\nIt can be used to evaluate the quality of machine-generated texts.\n"
UpperCAmelCase : Tuple = "\nCalculates sari score (between 0 and 100) given a list of source and predicted\nsentences, and a list of lists of reference sentences. It also computes the BLEU score as well as the exact match score.\nArgs:\n sources: list of source sentences where each sentence should be a string.\n predictions: list of predicted sentences where each sentence should be a string.\n references: list of lists of reference sentences where each sentence should be a string.\nReturns:\n sari: sari score\n sacrebleu: sacrebleu score\n exact: exact score\n\nExamples:\n >>> sources=[\"About 95 species are currently accepted .\"]\n >>> predictions=[\"About 95 you now get in .\"]\n >>> references=[[\"About 95 species are currently known .\"]]\n >>> wiki_split = datasets.load_metric(\"wiki_split\")\n >>> results = wiki_split.compute(sources=sources, predictions=predictions, references=references)\n >>> print(results)\n {'sari': 21.805555555555557, 'sacrebleu': 14.535768424205482, 'exact': 0.0}\n"
def __lowerCamelCase ( lowerCamelCase__ : int ):
'''simple docstring'''
def remove_articles(lowerCamelCase__ : Any ):
lowerCamelCase = re.compile(R"""\b(a|an|the)\b""" , re.UNICODE )
return re.sub(lowerCamelCase__ , """ """ , lowerCamelCase__ )
def white_space_fix(lowerCamelCase__ : str ):
return " ".join(text.split() )
def remove_punc(lowerCamelCase__ : str ):
lowerCamelCase = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(lowerCamelCase__ : Optional[int] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase__ ) ) ) )
def __lowerCamelCase ( lowerCamelCase__ : str , lowerCamelCase__ : List[Any] ):
'''simple docstring'''
return int(normalize_answer(lowerCamelCase__ ) == normalize_answer(lowerCamelCase__ ) )
def __lowerCamelCase ( lowerCamelCase__ : Any , lowerCamelCase__ : int ):
'''simple docstring'''
lowerCamelCase = [any(compute_exact(lowerCamelCase__ , lowerCamelCase__ ) for ref in refs ) for pred, refs in zip(lowerCamelCase__ , lowerCamelCase__ )]
return (sum(lowerCamelCase__ ) / len(lowerCamelCase__ )) * 100
def __lowerCamelCase ( lowerCamelCase__ : int , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : List[Any] , lowerCamelCase__ : List[Any] ):
'''simple docstring'''
lowerCamelCase = [rgram for rgrams in rgramslist for rgram in rgrams]
lowerCamelCase = Counter(lowerCamelCase__ )
lowerCamelCase = Counter(lowerCamelCase__ )
lowerCamelCase = Counter()
for sgram, scount in sgramcounter.items():
lowerCamelCase = scount * numref
lowerCamelCase = Counter(lowerCamelCase__ )
lowerCamelCase = Counter()
for cgram, ccount in cgramcounter.items():
lowerCamelCase = ccount * numref
# KEEP
lowerCamelCase = sgramcounter_rep & cgramcounter_rep
lowerCamelCase = keepgramcounter_rep & rgramcounter
lowerCamelCase = sgramcounter_rep & rgramcounter
lowerCamelCase = 0
lowerCamelCase = 0
for keepgram in keepgramcountergood_rep:
keeptmpscorea += keepgramcountergood_rep[keepgram] / keepgramcounter_rep[keepgram]
# Fix an alleged bug [2] in the keep score computation.
# keeptmpscore2 += keepgramcountergood_rep[keepgram] / keepgramcounterall_rep[keepgram]
keeptmpscorea += keepgramcountergood_rep[keepgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowerCamelCase = 1
lowerCamelCase = 1
if len(lowerCamelCase__ ) > 0:
lowerCamelCase = keeptmpscorea / len(lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
# Fix an alleged bug [2] in the keep score computation.
# keepscore_recall = keeptmpscore2 / len(keepgramcounterall_rep)
lowerCamelCase = keeptmpscorea / sum(keepgramcounterall_rep.values() )
lowerCamelCase = 0
if keepscore_precision > 0 or keepscore_recall > 0:
lowerCamelCase = 2 * keepscore_precision * keepscore_recall / (keepscore_precision + keepscore_recall)
# DELETION
lowerCamelCase = sgramcounter_rep - cgramcounter_rep
lowerCamelCase = delgramcounter_rep - rgramcounter
lowerCamelCase = sgramcounter_rep - rgramcounter
lowerCamelCase = 0
lowerCamelCase = 0
for delgram in delgramcountergood_rep:
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounter_rep[delgram]
deltmpscorea += delgramcountergood_rep[delgram] / delgramcounterall_rep[delgram]
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowerCamelCase = 1
if len(lowerCamelCase__ ) > 0:
lowerCamelCase = deltmpscorea / len(lowerCamelCase__ )
# ADDITION
lowerCamelCase = set(lowerCamelCase__ ) - set(lowerCamelCase__ )
lowerCamelCase = set(lowerCamelCase__ ) & set(lowerCamelCase__ )
lowerCamelCase = set(lowerCamelCase__ ) - set(lowerCamelCase__ )
lowerCamelCase = 0
for addgram in addgramcountergood:
addtmpscore += 1
# Define 0/0=1 instead of 0 to give higher scores for predictions that match
# a target exactly.
lowerCamelCase = 1
lowerCamelCase = 1
if len(lowerCamelCase__ ) > 0:
lowerCamelCase = addtmpscore / len(lowerCamelCase__ )
if len(lowerCamelCase__ ) > 0:
lowerCamelCase = addtmpscore / len(lowerCamelCase__ )
lowerCamelCase = 0
if addscore_precision > 0 or addscore_recall > 0:
lowerCamelCase = 2 * addscore_precision * addscore_recall / (addscore_precision + addscore_recall)
return (keepscore, delscore_precision, addscore)
def __lowerCamelCase ( lowerCamelCase__ : List[Any] , lowerCamelCase__ : Dict , lowerCamelCase__ : str ):
'''simple docstring'''
lowerCamelCase = len(lowerCamelCase__ )
lowerCamelCase = ssent.split(""" """ )
lowerCamelCase = csent.split(""" """ )
lowerCamelCase = []
lowerCamelCase = []
lowerCamelCase = []
lowerCamelCase = []
lowerCamelCase = []
lowerCamelCase = []
lowerCamelCase = []
lowerCamelCase = []
lowerCamelCase = []
lowerCamelCase = []
for rsent in rsents:
lowerCamelCase = rsent.split(""" """ )
lowerCamelCase = []
lowerCamelCase = []
lowerCamelCase = []
ragramslist.append(lowerCamelCase__ )
for i in range(0 , len(lowerCamelCase__ ) - 1 ):
if i < len(lowerCamelCase__ ) - 1:
lowerCamelCase = ragrams[i] + """ """ + ragrams[i + 1]
ragrams.append(lowerCamelCase__ )
if i < len(lowerCamelCase__ ) - 2:
lowerCamelCase = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2]
ragrams.append(lowerCamelCase__ )
if i < len(lowerCamelCase__ ) - 3:
lowerCamelCase = ragrams[i] + """ """ + ragrams[i + 1] + """ """ + ragrams[i + 2] + """ """ + ragrams[i + 3]
ragrams.append(lowerCamelCase__ )
ragramslist.append(lowerCamelCase__ )
ragramslist.append(lowerCamelCase__ )
ragramslist.append(lowerCamelCase__ )
for i in range(0 , len(lowerCamelCase__ ) - 1 ):
if i < len(lowerCamelCase__ ) - 1:
lowerCamelCase = sagrams[i] + """ """ + sagrams[i + 1]
sagrams.append(lowerCamelCase__ )
if i < len(lowerCamelCase__ ) - 2:
lowerCamelCase = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2]
sagrams.append(lowerCamelCase__ )
if i < len(lowerCamelCase__ ) - 3:
lowerCamelCase = sagrams[i] + """ """ + sagrams[i + 1] + """ """ + sagrams[i + 2] + """ """ + sagrams[i + 3]
sagrams.append(lowerCamelCase__ )
for i in range(0 , len(lowerCamelCase__ ) - 1 ):
if i < len(lowerCamelCase__ ) - 1:
lowerCamelCase = cagrams[i] + """ """ + cagrams[i + 1]
cagrams.append(lowerCamelCase__ )
if i < len(lowerCamelCase__ ) - 2:
lowerCamelCase = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2]
cagrams.append(lowerCamelCase__ )
if i < len(lowerCamelCase__ ) - 3:
lowerCamelCase = cagrams[i] + """ """ + cagrams[i + 1] + """ """ + cagrams[i + 2] + """ """ + cagrams[i + 3]
cagrams.append(lowerCamelCase__ )
((lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase)) = SARIngram(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
((lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase)) = SARIngram(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
((lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase)) = SARIngram(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
((lowerCamelCase) , (lowerCamelCase) , (lowerCamelCase)) = SARIngram(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
lowerCamelCase = sum([keepascore, keepascore, keepascore, keepascore] ) / 4
lowerCamelCase = sum([delascore, delascore, delascore, delascore] ) / 4
lowerCamelCase = sum([addascore, addascore, addascore, addascore] ) / 4
lowerCamelCase = (avgkeepscore + avgdelscore + avgaddscore) / 3
return finalscore
def __lowerCamelCase ( lowerCamelCase__ : Tuple , lowerCamelCase__ : bool = True , lowerCamelCase__ : str = "13a" , lowerCamelCase__ : bool = True ):
'''simple docstring'''
if lowercase:
lowerCamelCase = sentence.lower()
if tokenizer in ["13a", "intl"]:
if version.parse(sacrebleu.__version__ ).major >= 2:
lowerCamelCase = sacrebleu.metrics.bleu._get_tokenizer(lowerCamelCase__ )()(lowerCamelCase__ )
else:
lowerCamelCase = sacrebleu.TOKENIZERS[tokenizer]()(lowerCamelCase__ )
elif tokenizer == "moses":
lowerCamelCase = sacremoses.MosesTokenizer().tokenize(lowerCamelCase__ , return_str=lowerCamelCase__ , escape=lowerCamelCase__ )
elif tokenizer == "penn":
lowerCamelCase = sacremoses.MosesTokenizer().penn_tokenize(lowerCamelCase__ , return_str=lowerCamelCase__ )
else:
lowerCamelCase = sentence
if not return_str:
lowerCamelCase = normalized_sent.split()
return normalized_sent
def __lowerCamelCase ( lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[int] , lowerCamelCase__ : Any ):
'''simple docstring'''
if not (len(lowerCamelCase__ ) == len(lowerCamelCase__ ) == len(lowerCamelCase__ )):
raise ValueError("""Sources length must match predictions and references lengths.""" )
lowerCamelCase = 0
for src, pred, refs in zip(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ):
sari_score += SARIsent(normalize(lowerCamelCase__ ) , normalize(lowerCamelCase__ ) , [normalize(lowerCamelCase__ ) for sent in refs] )
lowerCamelCase = sari_score / len(lowerCamelCase__ )
return 100 * sari_score
def __lowerCamelCase ( lowerCamelCase__ : str , lowerCamelCase__ : Optional[Any] , lowerCamelCase__ : Tuple="exp" , lowerCamelCase__ : Optional[Any]=None , lowerCamelCase__ : int=False , lowerCamelCase__ : int=False , lowerCamelCase__ : Dict=False , ):
'''simple docstring'''
lowerCamelCase = len(references[0] )
if any(len(lowerCamelCase__ ) != references_per_prediction for refs in references ):
raise ValueError("""Sacrebleu requires the same number of references for each prediction""" )
lowerCamelCase = [[refs[i] for refs in references] for i in range(lowerCamelCase__ )]
lowerCamelCase = sacrebleu.corpus_bleu(
lowerCamelCase__ , lowerCamelCase__ , smooth_method=lowerCamelCase__ , smooth_value=lowerCamelCase__ , force=lowerCamelCase__ , lowercase=lowerCamelCase__ , use_effective_order=lowerCamelCase__ , )
return output.score
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
"""simple docstring"""
def __A ( self ) -> str:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Value("""string""" , id="""sequence""" ),
"""references""": datasets.Sequence(datasets.Value("""string""" , id="""sequence""" ) , id="""references""" ),
} ) , codebase_urls=[
"""https://github.com/huggingface/transformers/blob/master/src/transformers/data/metrics/squad_metrics.py""",
"""https://github.com/cocoxu/simplification/blob/master/SARI.py""",
"""https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/utils/sari_hook.py""",
"""https://github.com/mjpost/sacreBLEU""",
] , reference_urls=[
"""https://www.aclweb.org/anthology/Q16-1029.pdf""",
"""https://github.com/mjpost/sacreBLEU""",
"""https://en.wikipedia.org/wiki/BLEU""",
"""https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213""",
] , )
def __A ( self , A , A , A ) -> List[str]:
'''simple docstring'''
lowerCamelCase = {}
result.update({"""sari""": compute_sari(sources=A , predictions=A , references=A )} )
result.update({"""sacrebleu""": compute_sacrebleu(predictions=A , references=A )} )
result.update({"""exact""": compute_em(predictions=A , references=A )} )
return result
| 252 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
UpperCAmelCase : Optional[Any] = {
"configuration_swiftformer": [
"SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"SwiftFormerConfig",
"SwiftFormerOnnxConfig",
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Dict = [
"SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"SwiftFormerForImageClassification",
"SwiftFormerModel",
"SwiftFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_swiftformer import (
SWIFTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
SwiftFormerConfig,
SwiftFormerOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swiftformer import (
SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
SwiftFormerForImageClassification,
SwiftFormerModel,
SwiftFormerPreTrainedModel,
)
else:
import sys
UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 252 | 1 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class a ( _A , _A ):
'''simple docstring'''
lowerCAmelCase : Dict = 1
@register_to_config
def __init__( self : List[Any] , __snake_case : List[Any]=20_00 , __snake_case : Tuple=0.1 , __snake_case : Union[str, Any]=20 , __snake_case : Dict=1E-3 ):
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = None
def lowerCamelCase_ ( self : Optional[int] , __snake_case : Optional[int] , __snake_case : Union[str, torch.device] = None ):
UpperCAmelCase_ = torch.linspace(1 , self.config.sampling_eps , __snake_case , device=__snake_case )
def lowerCamelCase_ ( self : Any , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Dict , __snake_case : List[Any]=None ):
if self.timesteps is None:
raise ValueError(
'''`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler''' )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
UpperCAmelCase_ = (
-0.25 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
UpperCAmelCase_ = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
UpperCAmelCase_ = std.flatten()
while len(std.shape ) < len(score.shape ):
UpperCAmelCase_ = std.unsqueeze(-1 )
UpperCAmelCase_ = -score / std
# compute
UpperCAmelCase_ = -1.0 / len(self.timesteps )
UpperCAmelCase_ = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
UpperCAmelCase_ = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
UpperCAmelCase_ = beta_t.unsqueeze(-1 )
UpperCAmelCase_ = -0.5 * beta_t * x
UpperCAmelCase_ = torch.sqrt(__snake_case )
UpperCAmelCase_ = drift - diffusion**2 * score
UpperCAmelCase_ = x + drift * dt
# add noise
UpperCAmelCase_ = randn_tensor(x.shape , layout=x.layout , generator=__snake_case , device=x.device , dtype=x.dtype )
UpperCAmelCase_ = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : Dict ):
return self.config.num_train_timesteps
| 359 |
from __future__ import annotations
_lowerCamelCase = list[list[int]]
# assigning initial values to the grid
_lowerCamelCase = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
_lowerCamelCase = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Matrix , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Matrix ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Matrix ) -> Matrix | None:
if location := find_empty_location(__UpperCamelCase ):
UpperCAmelCase_ , UpperCAmelCase_ = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ):
UpperCAmelCase_ = digit
if sudoku(__UpperCamelCase ) is not None:
return grid
UpperCAmelCase_ = 0
return None
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : Matrix ) -> None:
for row in grid:
for cell in row:
print(__UpperCamelCase , end=''' ''' )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('\nExample grid:\n' + '=' * 20)
print_solution(example_grid)
print('\nExample grid solution:')
_lowerCamelCase = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('Cannot find a solution.')
| 177 | 0 |
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = int(SCREAMING_SNAKE_CASE )
# Initialize Result
lowercase__ = []
# Traverse through all denomination
for denomination in reversed(SCREAMING_SNAKE_CASE ):
# Find denominations
while int(SCREAMING_SNAKE_CASE ) >= int(SCREAMING_SNAKE_CASE ):
total_value -= int(SCREAMING_SNAKE_CASE )
answer.append(SCREAMING_SNAKE_CASE ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
lowerCAmelCase = []
lowerCAmelCase = '0'
if (
input('Do you want to enter your denominations ? (yY/n): ').strip().lower()
== "y"
):
lowerCAmelCase = int(input('Enter the number of denominations you want to add: ').strip())
for i in range(0, n):
denominations.append(int(input(f"""Denomination {i}: """).strip()))
lowerCAmelCase = input('Enter the change you want to make in Indian Currency: ').strip()
else:
# All denominations of Indian Currency if user does not enter
lowerCAmelCase = [1, 2, 5, 10, 20, 50, 100, 500, 2000]
lowerCAmelCase = input('Enter the change you want to make: ').strip()
if int(value) == 0 or int(value) < 0:
print('The total value cannot be zero or negative.')
else:
print(f"""Following is minimal change for {value}: """)
lowerCAmelCase = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=' ')
| 110 |
import argparse
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoTokenizer, RobertaPreLayerNormConfig, RobertaPreLayerNormForMaskedLM
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase = logging.get_logger(__name__)
def _a ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
lowercase__ = RobertaPreLayerNormConfig.from_pretrained(
SCREAMING_SNAKE_CASE , architectures=['''RobertaPreLayerNormForMaskedLM'''] )
# convert state_dict
lowercase__ = torch.load(hf_hub_download(repo_id=SCREAMING_SNAKE_CASE , filename='''pytorch_model.bin''' ) )
lowercase__ = {}
for tensor_key, tensor_value in original_state_dict.items():
# The transformer implementation gives the model a unique name, rather than overwiriting 'roberta'
if tensor_key.startswith('''roberta.''' ):
lowercase__ = '''roberta_prelayernorm.''' + tensor_key[len('''roberta.''' ) :]
# The original implementation contains weights which are not used, remove them from the state_dict
if tensor_key.endswith('''.self.LayerNorm.weight''' ) or tensor_key.endswith('''.self.LayerNorm.bias''' ):
continue
lowercase__ = tensor_value
lowercase__ = RobertaPreLayerNormForMaskedLM.from_pretrained(
pretrained_model_name_or_path=SCREAMING_SNAKE_CASE , config=SCREAMING_SNAKE_CASE , state_dict=SCREAMING_SNAKE_CASE )
model.save_pretrained(SCREAMING_SNAKE_CASE )
# convert tokenizer
lowercase__ = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE )
tokenizer.save_pretrained(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint-repo',
default=None,
type=str,
required=True,
help='Path the official PyTorch dump, e.g. \'andreasmadsen/efficient_mlm_m0.40\'.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, required=True, help='Path to the output PyTorch model.'
)
lowerCAmelCase = parser.parse_args()
convert_roberta_prelayernorm_checkpoint_to_pytorch(args.checkpoint_repo, args.pytorch_dump_folder_path)
| 110 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_fnet import FNetTokenizer
else:
_a : List[Any] = None
_a : Union[str, Any] = logging.get_logger(__name__)
_a : Any = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
_a : Tuple = {
'''vocab_file''': {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/spiece.model''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''google/fnet-base''': '''https://huggingface.co/google/fnet-base/resolve/main/tokenizer.json''',
'''google/fnet-large''': '''https://huggingface.co/google/fnet-large/resolve/main/tokenizer.json''',
},
}
_a : Tuple = {
'''google/fnet-base''': 512,
'''google/fnet-large''': 512,
}
_a : Optional[Any] = '''▁'''
class _UpperCAmelCase( SCREAMING_SNAKE_CASE__ ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase__ = ['input_ids', 'token_type_ids']
lowercase__ = FNetTokenizer
def __init__( self , __a=None , __a=None , __a=False , __a=True , __a=True , __a="<unk>" , __a="[SEP]" , __a="<pad>" , __a="[CLS]" , __a="[MASK]" , **__a , ) -> Union[str, Any]:
'''simple docstring'''
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
_UpperCamelCase = (
AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE , normalized=_SCREAMING_SNAKE_CASE)
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE)
else mask_token
)
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , remove_space=_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
_UpperCamelCase = do_lower_case
_UpperCamelCase = remove_space
_UpperCamelCase = keep_accents
_UpperCamelCase = vocab_file
_UpperCamelCase = False if not self.vocab_file else True
def UpperCAmelCase ( self , __a , __a = None) -> List[int]:
'''simple docstring'''
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase ( self , __a , __a = None) -> List[int]:
'''simple docstring'''
_UpperCamelCase = [self.sep_token_id]
_UpperCamelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def UpperCAmelCase ( self , __a , __a = None) -> Tuple[str]:
'''simple docstring'''
if not os.path.isdir(_SCREAMING_SNAKE_CASE):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''')
return
_UpperCamelCase = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''])
if os.path.abspath(self.vocab_file) != os.path.abspath(_SCREAMING_SNAKE_CASE):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE)
return (out_vocab_file,)
| 364 |
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline
| 100 | 0 |
'''simple docstring'''
from __future__ import annotations
a_ : Tuple = list[list[int]]
# assigning initial values to the grid
a_ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
a_ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def __snake_case ( UpperCAmelCase_ : Matrix , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def __snake_case ( UpperCAmelCase_ : Matrix ):
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def __snake_case ( UpperCAmelCase_ : Matrix ):
if location := find_empty_location(UpperCAmelCase_ ):
lowerCamelCase_ ,lowerCamelCase_ = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCamelCase_ = digit
if sudoku(UpperCAmelCase_ ) is not None:
return grid
lowerCamelCase_ = 0
return None
def __snake_case ( UpperCAmelCase_ : Matrix ):
for row in grid:
for cell in row:
print(UpperCAmelCase_ , end=" " )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("""\nExample grid:\n""" + """=""" * 20)
print_solution(example_grid)
print("""\nExample grid solution:""")
a_ : int = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("""Cannot find a solution.""")
| 55 | """simple docstring"""
from collections import Counter
from pathlib import Path
from typing import Optional, Tuple
import yaml
class UpperCamelCase ( yaml.SafeLoader ):
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> Optional[int]:
'''simple docstring'''
lowercase_ : str = [self.constructed_objects[key_node] for key_node, _ in node.value]
lowercase_ : str = [tuple(__UpperCamelCase ) if isinstance(__UpperCamelCase ,__UpperCamelCase ) else key for key in keys]
lowercase_ : List[Any] = Counter(__UpperCamelCase )
lowercase_ : str = [key for key in counter if counter[key] > 1]
if duplicate_keys:
raise TypeError(f'''Got duplicate yaml keys: {duplicate_keys}''' )
def _UpperCAmelCase ( self ,__UpperCamelCase ,__UpperCamelCase=False ) -> List[Any]:
'''simple docstring'''
lowercase_ : Optional[int] = super().construct_mapping(__UpperCamelCase ,deep=__UpperCamelCase )
self._check_no_duplicates_on_constructed_node(__UpperCamelCase )
return mapping
def lowercase__( __SCREAMING_SNAKE_CASE : str ):
lowercase_ : Tuple = list(readme_content.splitlines() )
if full_content and full_content[0] == "---" and "---" in full_content[1:]:
lowercase_ : Dict = full_content[1:].index('---' ) + 1
lowercase_ : Optional[int] = '\n'.join(full_content[1:sep_idx] )
return yamlblock, "\n".join(full_content[sep_idx + 1 :] )
return None, "\n".join(__SCREAMING_SNAKE_CASE )
class UpperCamelCase ( lowercase_ ):
# class attributes
lowercase = {'train_eval_index'} # train-eval-index in the YAML metadata
@classmethod
def _UpperCAmelCase ( cls ,__UpperCamelCase ) -> "DatasetMetadata":
'''simple docstring'''
with open(__UpperCamelCase ,encoding='utf-8' ) as readme_file:
lowercase_ , lowercase_ : Optional[int] = _split_yaml_from_readme(readme_file.read() )
if yaml_string is not None:
return cls.from_yaml_string(__UpperCamelCase )
else:
return cls()
def _UpperCAmelCase ( self ,__UpperCamelCase ) -> List[Any]:
'''simple docstring'''
if path.exists():
with open(__UpperCamelCase ,encoding='utf-8' ) as readme_file:
lowercase_ : Dict = readme_file.read()
else:
lowercase_ : int = None
lowercase_ : Any = self._to_readme(__UpperCamelCase )
with open(__UpperCamelCase ,'w' ,encoding='utf-8' ) as readme_file:
readme_file.write(__UpperCamelCase )
def _UpperCAmelCase ( self ,__UpperCamelCase = None ) -> str:
'''simple docstring'''
if readme_content is not None:
lowercase_ , lowercase_ : Optional[Any] = _split_yaml_from_readme(__UpperCamelCase )
lowercase_ : Optional[Any] = '---\n' + self.to_yaml_string() + '---\n' + content
else:
lowercase_ : Tuple = '---\n' + self.to_yaml_string() + '---\n'
return full_content
@classmethod
def _UpperCAmelCase ( cls ,__UpperCamelCase ) -> "DatasetMetadata":
'''simple docstring'''
lowercase_ : List[str] = yaml.load(__UpperCamelCase ,Loader=_NoDuplicateSafeLoader ) or {}
# Convert the YAML keys to DatasetMetadata fields
lowercase_ : Dict = {
(key.replace('-' ,'_' ) if key.replace('-' ,'_' ) in cls._FIELDS_WITH_DASHES else key): value
for key, value in metadata_dict.items()
}
return cls(**__UpperCamelCase )
def _UpperCAmelCase ( self ) -> str:
'''simple docstring'''
return yaml.safe_dump(
{
(key.replace('_' ,'-' ) if key in self._FIELDS_WITH_DASHES else key): value
for key, value in self.items()
} ,sort_keys=__UpperCamelCase ,allow_unicode=__UpperCamelCase ,encoding='utf-8' ,).decode('utf-8' )
__SCREAMING_SNAKE_CASE ={
"image-classification": [],
"translation": [],
"image-segmentation": [],
"fill-mask": [],
"automatic-speech-recognition": [],
"token-classification": [],
"sentence-similarity": [],
"audio-classification": [],
"question-answering": [],
"summarization": [],
"zero-shot-classification": [],
"table-to-text": [],
"feature-extraction": [],
"other": [],
"multiple-choice": [],
"text-classification": [],
"text-to-image": [],
"text2text-generation": [],
"zero-shot-image-classification": [],
"tabular-classification": [],
"tabular-regression": [],
"image-to-image": [],
"tabular-to-text": [],
"unconditional-image-generation": [],
"text-retrieval": [],
"text-to-speech": [],
"object-detection": [],
"audio-to-audio": [],
"text-generation": [],
"conversational": [],
"table-question-answering": [],
"visual-question-answering": [],
"image-to-text": [],
"reinforcement-learning": [],
"voice-activity-detection": [],
"time-series-forecasting": [],
"document-question-answering": [],
}
if __name__ == "__main__":
from argparse import ArgumentParser
__SCREAMING_SNAKE_CASE =ArgumentParser(usage="Validate the yaml metadata block of a README.md file.")
ap.add_argument("readme_filepath")
__SCREAMING_SNAKE_CASE =ap.parse_args()
__SCREAMING_SNAKE_CASE =Path(args.readme_filepath)
__SCREAMING_SNAKE_CASE =DatasetMetadata.from_readme(readme_filepath)
print(dataset_metadata)
dataset_metadata.to_readme(readme_filepath)
| 213 | 0 |
from __future__ import annotations
import os
from collections.abc import Mapping
_SCREAMING_SNAKE_CASE : Any = tuple[int, int]
class UpperCAmelCase__ :
"""simple docstring"""
def __init__( self : Dict , __lowerCamelCase : set[int] , __lowerCamelCase : Mapping[EdgeT, int] ) -> None:
SCREAMING_SNAKE_CASE__ = vertices
SCREAMING_SNAKE_CASE__ = {
(min(__lowerCamelCase ), max(__lowerCamelCase )): weight for edge, weight in edges.items()
}
def lowercase_ ( self : Tuple , __lowerCamelCase : EdgeT , __lowerCamelCase : int ) -> None:
self.vertices.add(edge[0] )
self.vertices.add(edge[1] )
SCREAMING_SNAKE_CASE__ = weight
def lowercase_ ( self : Optional[Any] ) -> Graph:
SCREAMING_SNAKE_CASE__ = Graph({min(self.vertices )} , {} )
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
while len(subgraph.vertices ) < len(self.vertices ):
SCREAMING_SNAKE_CASE__ = max(self.edges.values() ) + 1
for edge, weight in self.edges.items():
if (edge[0] in subgraph.vertices) ^ (edge[1] in subgraph.vertices):
if weight < min_weight:
SCREAMING_SNAKE_CASE__ = edge
SCREAMING_SNAKE_CASE__ = weight
subgraph.add_edge(__lowerCamelCase , __lowerCamelCase )
return subgraph
def UpperCAmelCase_ ( _A = "p107_network.txt" ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ = os.path.abspath(os.path.dirname(_A ) )
SCREAMING_SNAKE_CASE__ = os.path.join(_A , _A )
SCREAMING_SNAKE_CASE__ = {}
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
SCREAMING_SNAKE_CASE__ = 42
with open(_A ) as f:
SCREAMING_SNAKE_CASE__ = f.read().strip().split('''\n''' )
SCREAMING_SNAKE_CASE__ = [line.split(''',''' ) for line in data]
for edgea in range(1 , len(_A ) ):
for edgea in range(_A ):
if adjaceny_matrix[edgea][edgea] != "-":
SCREAMING_SNAKE_CASE__ = int(adjaceny_matrix[edgea][edgea] )
SCREAMING_SNAKE_CASE__ = Graph(set(range(len(_A ) ) ) , _A )
SCREAMING_SNAKE_CASE__ = graph.prims_algorithm()
SCREAMING_SNAKE_CASE__ = sum(graph.edges.values() )
SCREAMING_SNAKE_CASE__ = sum(subgraph.edges.values() )
return initial_total - optimal_total
if __name__ == "__main__":
print(F"{solution() = }")
| 218 |
from functools import reduce
_SCREAMING_SNAKE_CASE : Any = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def UpperCAmelCase_ ( _A = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda _A , _A : str(int(_A ) * int(_A ) ) , n[i : i + 13] ) )
for i in range(len(_A ) - 12 ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 218 | 1 |
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class _a ( _lowerCAmelCase ):
A = 42
A = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 147 |
a : Optional[int] = 9.8_0_6_6_5
def lowerCAmelCase_ (lowerCAmelCase__: float , lowerCAmelCase__: float , lowerCAmelCase__: float = g ):
"""simple docstring"""
if fluid_density <= 0:
raise ValueError("""Impossible fluid density""" )
if volume < 0:
raise ValueError("""Impossible Object volume""" )
if gravity <= 0:
raise ValueError("""Impossible Gravity""" )
return fluid_density * gravity * volume
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
| 147 | 1 |
'''simple docstring'''
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="""%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s""",
datefmt="""%Y-%m-%d %H:%M:%S""",
level=os.environ.get("""LOGLEVEL""", """INFO""").upper(),
stream=sys.stdout,
)
a : Optional[int] = logging.getLogger(__name__)
a : Dict = {"""facebook/bart-base""": BartForConditionalGeneration}
a : Tuple = {"""facebook/bart-base""": BartTokenizer}
def __lowerCamelCase ( ) -> Any:
UpperCAmelCase : List[Any] = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""" )
parser.add_argument(
"""--validation_file""" , type=_lowercase , default=_lowercase , help="""A csv or a json file containing the validation data.""" )
parser.add_argument(
"""--max_length""" , type=_lowercase , default=5 , help="""The maximum total input sequence length after tokenization.""" , )
parser.add_argument(
"""--num_beams""" , type=_lowercase , default=_lowercase , help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) , )
parser.add_argument(
"""--model_name_or_path""" , type=_lowercase , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=_lowercase , )
parser.add_argument(
"""--config_name""" , type=_lowercase , default=_lowercase , help="""Pretrained config name or path if not the same as model_name""" , )
parser.add_argument(
"""--device""" , type=_lowercase , default="""cpu""" , help="""Device where the model will be run""" , )
parser.add_argument("""--output_file_path""" , type=_lowercase , default=_lowercase , help="""Where to store the final ONNX file.""" )
UpperCAmelCase : Optional[Any] = parser.parse_args()
return args
def __lowerCamelCase ( _lowercase , _lowercase="cpu" ) -> int:
UpperCAmelCase : Union[str, Any] = model_dict[model_name].from_pretrained(_lowercase ).to(_lowercase )
UpperCAmelCase : Tuple = tokenizer_dict[model_name].from_pretrained(_lowercase )
if model_name in ["facebook/bart-base"]:
UpperCAmelCase : List[Any] = 0
UpperCAmelCase : str = None
UpperCAmelCase : Tuple = 0
return huggingface_model, tokenizer
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Tuple:
model.eval()
UpperCAmelCase : Tuple = None
UpperCAmelCase : Union[str, Any] = torch.jit.script(BARTBeamSearchGenerator(_lowercase ) )
with torch.no_grad():
UpperCAmelCase : int = """My friends are cool but they eat too many carbs."""
UpperCAmelCase : Dict = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_0_2_4 , return_tensors="""pt""" ).to(model.device )
UpperCAmelCase : List[Any] = model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , num_beams=_lowercase , max_length=_lowercase , early_stopping=_lowercase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_lowercase , (
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _lowercase , opset_version=1_4 , input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] , output_names=["""output_ids"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} , example_outputs=_lowercase , )
logger.info("""Model exported to {}""".format(_lowercase ) )
UpperCAmelCase : Optional[int] = remove_dup_initializers(os.path.abspath(_lowercase ) )
logger.info("""Deduplicated and optimized model written to {}""".format(_lowercase ) )
UpperCAmelCase : Union[str, Any] = onnxruntime.InferenceSession(_lowercase )
UpperCAmelCase : List[str] = ort_sess.run(
_lowercase , {
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(_lowercase ),
"""max_length""": np.array(_lowercase ),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 )
logger.info("""Model outputs from torch and ONNX Runtime are similar.""" )
logger.info("""Success.""" )
def __lowerCamelCase ( ) -> Optional[int]:
UpperCAmelCase : Any = parse_args()
UpperCAmelCase : str = 5
UpperCAmelCase : Optional[Any] = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
UpperCAmelCase : Dict = torch.device(args.device )
UpperCAmelCase : Union[str, Any] = load_model_tokenizer(args.model_name_or_path , _lowercase )
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""" )
model.to(_lowercase )
if args.max_length:
UpperCAmelCase : Optional[int] = args.max_length
if args.num_beams:
UpperCAmelCase : int = args.num_beams
if args.output_file_path:
UpperCAmelCase : Tuple = args.output_file_path
else:
UpperCAmelCase : int = """BART.onnx"""
logger.info("""Exporting model to ONNX""" )
export_and_validate_model(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
if __name__ == "__main__":
main()
| 357 |
'''simple docstring'''
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
a : Optional[int] = 1_0
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase ) -> int:
for i in range(_lowercase , _lowercase ):
if array[i] == target:
return i
return -1
def __lowerCamelCase ( _lowercase , _lowercase ) -> int:
UpperCAmelCase : Tuple = 0
UpperCAmelCase : List[str] = len(_lowercase )
while left <= right:
if right - left < precision:
return lin_search(_lowercase , _lowercase , _lowercase , _lowercase )
UpperCAmelCase : Union[str, Any] = (left + right) // 3 + 1
UpperCAmelCase : Union[str, Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
UpperCAmelCase : Any = one_third - 1
elif array[two_third] < target:
UpperCAmelCase : Tuple = two_third + 1
else:
UpperCAmelCase : int = one_third + 1
UpperCAmelCase : List[Any] = two_third - 1
else:
return -1
def __lowerCamelCase ( _lowercase , _lowercase , _lowercase , _lowercase ) -> int:
if left < right:
if right - left < precision:
return lin_search(_lowercase , _lowercase , _lowercase , _lowercase )
UpperCAmelCase : str = (left + right) // 3 + 1
UpperCAmelCase : Optional[Any] = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(_lowercase , one_third - 1 , _lowercase , _lowercase )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , _lowercase , _lowercase , _lowercase )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , _lowercase , _lowercase )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
a : Any = input("""Enter numbers separated by comma:\n""").strip()
a : Any = [int(item.strip()) for item in user_input.split(""",""")]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
a : Tuple = int(input("""Enter the number to be found in the list:\n""").strip())
a : Union[str, Any] = ite_ternary_search(collection, target)
a : Optional[Any] = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F'''Iterative search: {target} found at positions: {resulta}''')
print(F'''Recursive search: {target} found at positions: {resulta}''')
else:
print("""Not found""")
| 338 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class a_ :
def __init__( self : str , lowercase : Dict , lowercase : Tuple=13 , lowercase : int=30 , lowercase : Any=2 , lowercase : int=3 , lowercase : Dict=True , lowercase : Tuple=True , lowercase : Optional[Any]=32 , lowercase : List[str]=5 , lowercase : Dict=4 , lowercase : Optional[int]=37 , lowercase : int="gelu" , lowercase : List[Any]=0.1 , lowercase : Union[str, Any]=0.1 , lowercase : Any=10 , lowercase : List[Any]=0.02 , lowercase : Union[str, Any]=None , lowercase : Optional[Any]=2 , ):
"""simple docstring"""
lowercase_ :List[str] = parent
lowercase_ :Optional[int] = batch_size
lowercase_ :int = image_size
lowercase_ :List[str] = patch_size
lowercase_ :Optional[int] = num_channels
lowercase_ :Union[str, Any] = is_training
lowercase_ :Dict = use_labels
lowercase_ :Union[str, Any] = hidden_size
lowercase_ :Tuple = num_hidden_layers
lowercase_ :Union[str, Any] = num_attention_heads
lowercase_ :List[Any] = intermediate_size
lowercase_ :Optional[Any] = hidden_act
lowercase_ :List[Any] = hidden_dropout_prob
lowercase_ :List[str] = attention_probs_dropout_prob
lowercase_ :Optional[int] = type_sequence_label_size
lowercase_ :List[str] = initializer_range
lowercase_ :Any = scope
lowercase_ :Optional[int] = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowercase_ :Union[str, Any] = (image_size // patch_size) ** 2
lowercase_ :List[Any] = num_patches + 1
def lowercase__ ( self : Tuple ):
"""simple docstring"""
lowercase_ :Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowercase_ :Dict = None
if self.use_labels:
lowercase_ :int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowercase_ :int = self.get_config()
return config, pixel_values, labels
def lowercase__ ( self : List[Any] ):
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowercase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def lowercase__ ( self : List[Any] , lowercase : Dict , lowercase : Union[str, Any] , lowercase : List[str] ):
"""simple docstring"""
lowercase_ :Tuple = ViTModel(config=lowercase )
model.to(lowercase )
model.eval()
lowercase_ :Union[str, Any] = model(lowercase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowercase__ ( self : Dict , lowercase : Tuple , lowercase : Tuple , lowercase : str ):
"""simple docstring"""
lowercase_ :Any = ViTForMaskedImageModeling(config=lowercase )
model.to(lowercase )
model.eval()
lowercase_ :Optional[Any] = model(lowercase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowercase_ :Optional[int] = 1
lowercase_ :Tuple = ViTForMaskedImageModeling(lowercase )
model.to(lowercase )
model.eval()
lowercase_ :int = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ :int = model(lowercase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowercase__ ( self : Union[str, Any] , lowercase : Any , lowercase : Optional[Any] , lowercase : Union[str, Any] ):
"""simple docstring"""
lowercase_ :List[Any] = self.type_sequence_label_size
lowercase_ :Optional[Any] = ViTForImageClassification(lowercase )
model.to(lowercase )
model.eval()
lowercase_ :str = model(lowercase , labels=lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowercase_ :Optional[Any] = 1
lowercase_ :Optional[Any] = ViTForImageClassification(lowercase )
model.to(lowercase )
model.eval()
lowercase_ :Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowercase_ :Tuple = model(lowercase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
lowercase_ :Any = self.prepare_config_and_inputs()
(
(
lowercase_
) , (
lowercase_
) , (
lowercase_
) ,
) :List[str] = config_and_inputs
lowercase_ :str = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a_ ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
__A = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
__A = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
__A = True
__A = False
__A = False
__A = False
def lowercase__ ( self : List[str] ):
"""simple docstring"""
lowercase_ :Union[str, Any] = ViTModelTester(self )
lowercase_ :Any = ConfigTester(self , config_class=lowercase , has_text_modality=lowercase , hidden_size=37 )
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def lowercase__ ( self : Dict ):
"""simple docstring"""
pass
def lowercase__ ( self : Union[str, Any] ):
"""simple docstring"""
lowercase_ , lowercase_ :Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ :List[Any] = model_class(lowercase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowercase_ :Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowercase , nn.Linear ) )
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
lowercase_ , lowercase_ :List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowercase_ :List[Any] = model_class(lowercase )
lowercase_ :Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowercase_ :List[str] = [*signature.parameters.keys()]
lowercase_ :Any = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowercase )
def lowercase__ ( self : Any ):
"""simple docstring"""
lowercase_ :str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase )
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
lowercase_ :int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowercase )
def lowercase__ ( self : Dict ):
"""simple docstring"""
lowercase_ :Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowercase )
@slow
def lowercase__ ( self : Optional[int] ):
"""simple docstring"""
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowercase_ :Tuple = ViTModel.from_pretrained(lowercase )
self.assertIsNotNone(lowercase )
def UpperCAmelCase_ ( ):
lowercase_ :Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
@cached_property
def lowercase__ ( self : Tuple ):
"""simple docstring"""
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def lowercase__ ( self : Dict ):
"""simple docstring"""
lowercase_ :int = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(lowercase )
lowercase_ :Optional[Any] = self.default_image_processor
lowercase_ :Any = prepare_img()
lowercase_ :int = image_processor(images=lowercase , return_tensors="pt" ).to(lowercase )
# forward pass
with torch.no_grad():
lowercase_ :Optional[int] = model(**lowercase )
# verify the logits
lowercase_ :Optional[Any] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , lowercase )
lowercase_ :Union[str, Any] = torch.tensor([-0.27_44, 0.82_15, -0.08_36] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowercase , atol=1e-4 ) )
@slow
def lowercase__ ( self : str ):
"""simple docstring"""
lowercase_ :Dict = ViTModel.from_pretrained("facebook/dino-vits8" ).to(lowercase )
lowercase_ :Any = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480 )
lowercase_ :Tuple = prepare_img()
lowercase_ :Optional[Any] = image_processor(images=lowercase , return_tensors="pt" )
lowercase_ :List[Any] = inputs.pixel_values.to(lowercase )
# forward pass
with torch.no_grad():
lowercase_ :Union[str, Any] = model(lowercase , interpolate_pos_encoding=lowercase )
# verify the logits
lowercase_ :Optional[Any] = torch.Size((1, 3_601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , lowercase )
lowercase_ :Tuple = torch.tensor(
[[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]] ).to(lowercase )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowercase , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def lowercase__ ( self : Optional[Any] ):
"""simple docstring"""
lowercase_ :int = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" )
lowercase_ :List[str] = self.default_image_processor
lowercase_ :str = prepare_img()
lowercase_ :List[Any] = image_processor(images=lowercase , return_tensors="pt" )
lowercase_ :Any = inputs.pixel_values.to(lowercase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
lowercase_ :Any = model(lowercase )
| 223 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase : Optional[Any] ={
'''configuration_nllb_moe''': [
'''NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''NllbMoeConfig''',
]
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple =[
'''NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''NllbMoeForConditionalGeneration''',
'''NllbMoeModel''',
'''NllbMoePreTrainedModel''',
'''NllbMoeTop2Router''',
'''NllbMoeSparseMLP''',
]
if TYPE_CHECKING:
from .configuration_nllb_moe import (
NLLB_MOE_PRETRAINED_CONFIG_ARCHIVE_MAP,
NllbMoeConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_nllb_moe import (
NLLB_MOE_PRETRAINED_MODEL_ARCHIVE_LIST,
NllbMoeForConditionalGeneration,
NllbMoeModel,
NllbMoePreTrainedModel,
NllbMoeSparseMLP,
NllbMoeTopaRouter,
)
else:
import sys
lowerCAmelCase : Tuple =_LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 223 | 1 |
"""simple docstring"""
from __future__ import annotations
import numpy as np
def lowercase__ ( lowercase_ ) -> Any:
"""simple docstring"""
return np.maximum(0 ,lowercase_ )
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 310 |
"""simple docstring"""
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTForImageClassification, ViTForMaskedImageModeling, ViTModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Dict , __a : List[Any] , __a : str=13 , __a : Any=30 , __a : List[str]=2 , __a : Dict=3 , __a : Union[str, Any]=True , __a : Dict=True , __a : List[str]=32 , __a : Tuple=5 , __a : str=4 , __a : List[str]=37 , __a : Tuple="gelu" , __a : str=0.1 , __a : Optional[int]=0.1 , __a : Union[str, Any]=10 , __a : Optional[Any]=0.02 , __a : List[Any]=None , __a : str=2 , ) -> int:
_UpperCamelCase : Tuple = parent
_UpperCamelCase : str = batch_size
_UpperCamelCase : Tuple = image_size
_UpperCamelCase : List[str] = patch_size
_UpperCamelCase : Dict = num_channels
_UpperCamelCase : List[str] = is_training
_UpperCamelCase : Any = use_labels
_UpperCamelCase : int = hidden_size
_UpperCamelCase : List[Any] = num_hidden_layers
_UpperCamelCase : Union[str, Any] = num_attention_heads
_UpperCamelCase : Optional[int] = intermediate_size
_UpperCamelCase : Any = hidden_act
_UpperCamelCase : Dict = hidden_dropout_prob
_UpperCamelCase : Dict = attention_probs_dropout_prob
_UpperCamelCase : Optional[int] = type_sequence_label_size
_UpperCamelCase : int = initializer_range
_UpperCamelCase : Optional[int] = scope
_UpperCamelCase : Any = encoder_stride
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCamelCase : Optional[int] = (image_size // patch_size) ** 2
_UpperCamelCase : Optional[int] = num_patches + 1
def __SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
_UpperCamelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCamelCase : Union[str, Any] = None
if self.use_labels:
_UpperCamelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCamelCase : Any = self.get_config()
return config, pixel_values, labels
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__a , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def __SCREAMING_SNAKE_CASE ( self : Tuple , __a : Optional[int] , __a : Union[str, Any] , __a : Tuple ) -> Union[str, Any]:
_UpperCamelCase : Optional[Any] = ViTModel(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Tuple = model(__a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __SCREAMING_SNAKE_CASE ( self : Dict , __a : str , __a : Optional[int] , __a : int ) -> Optional[int]:
_UpperCamelCase : Tuple = ViTForMaskedImageModeling(config=__a )
model.to(__a )
model.eval()
_UpperCamelCase : Any = model(__a )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_UpperCamelCase : Union[str, Any] = 1
_UpperCamelCase : Union[str, Any] = ViTForMaskedImageModeling(__a )
model.to(__a )
model.eval()
_UpperCamelCase : List[Any] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase : Dict = model(__a )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def __SCREAMING_SNAKE_CASE ( self : List[Any] , __a : Tuple , __a : int , __a : Dict ) -> int:
_UpperCamelCase : Any = self.type_sequence_label_size
_UpperCamelCase : Optional[Any] = ViTForImageClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase : int = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCamelCase : Tuple = 1
_UpperCamelCase : Union[str, Any] = ViTForImageClassification(__a )
model.to(__a )
model.eval()
_UpperCamelCase : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCamelCase : List[Any] = model(__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
_UpperCamelCase : Dict = self.prepare_config_and_inputs()
(
(
_UpperCamelCase
), (
_UpperCamelCase
), (
_UpperCamelCase
),
) : Union[str, Any] = config_and_inputs
_UpperCamelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class __SCREAMING_SNAKE_CASE ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE__ :Optional[Any] = (
(
ViTModel,
ViTForImageClassification,
ViTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE__ :Any = (
{"feature-extraction": ViTModel, "image-classification": ViTForImageClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE__ :str = True
SCREAMING_SNAKE_CASE__ :List[Any] = False
SCREAMING_SNAKE_CASE__ :int = False
SCREAMING_SNAKE_CASE__ :int = False
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
_UpperCamelCase : Dict = ViTModelTester(self )
_UpperCamelCase : Any = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def __SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def __SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
pass
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
_UpperCamelCase, _UpperCamelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : List[Any] = model_class(__a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCamelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__a , nn.Linear ) )
def __SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
_UpperCamelCase, _UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCamelCase : Any = model_class(__a )
_UpperCamelCase : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCamelCase : List[str] = [*signature.parameters.keys()]
_UpperCamelCase : Optional[Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __a )
def __SCREAMING_SNAKE_CASE ( self : Any ) -> int:
_UpperCamelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
_UpperCamelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__a )
def __SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
_UpperCamelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def __SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCamelCase : List[str] = ViTModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowercase__ ( ) -> str:
"""simple docstring"""
_UpperCamelCase : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class __SCREAMING_SNAKE_CASE ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def __SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[int]:
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def __SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
_UpperCamelCase : List[Any] = ViTForImageClassification.from_pretrained("google/vit-base-patch16-224" ).to(__a )
_UpperCamelCase : str = self.default_image_processor
_UpperCamelCase : List[Any] = prepare_img()
_UpperCamelCase : Any = image_processor(images=__a , return_tensors="pt" ).to(__a )
# forward pass
with torch.no_grad():
_UpperCamelCase : Dict = model(**__a )
# verify the logits
_UpperCamelCase : Tuple = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
_UpperCamelCase : str = torch.tensor([-0.27_44, 0.82_15, -0.08_36] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1e-4 ) )
@slow
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
# ViT models have an `interpolate_pos_encoding` argument in their forward method,
# allowing to interpolate the pre-trained position embeddings in order to use
# the model on higher resolutions. The DINO model by Facebook AI leverages this
# to visualize self-attention on higher resolution images.
_UpperCamelCase : List[str] = ViTModel.from_pretrained("facebook/dino-vits8" ).to(__a )
_UpperCamelCase : Union[str, Any] = ViTImageProcessor.from_pretrained("facebook/dino-vits8" , size=480 )
_UpperCamelCase : List[str] = prepare_img()
_UpperCamelCase : int = image_processor(images=__a , return_tensors="pt" )
_UpperCamelCase : Any = inputs.pixel_values.to(__a )
# forward pass
with torch.no_grad():
_UpperCamelCase : str = model(__a , interpolate_pos_encoding=__a )
# verify the logits
_UpperCamelCase : int = torch.Size((1, 3601, 384) )
self.assertEqual(outputs.last_hidden_state.shape , __a )
_UpperCamelCase : int = torch.tensor(
[[4.23_40, 4.39_06, -6.66_92], [4.54_63, 1.89_28, -6.72_57], [4.44_29, 0.84_96, -5.85_85]] ).to(__a )
self.assertTrue(torch.allclose(outputs.last_hidden_state[0, :3, :3] , __a , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def __SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
_UpperCamelCase : Tuple = ViTModel.from_pretrained("facebook/dino-vits8" , torch_dtype=torch.floataa , device_map="auto" )
_UpperCamelCase : int = self.default_image_processor
_UpperCamelCase : Dict = prepare_img()
_UpperCamelCase : Union[str, Any] = image_processor(images=__a , return_tensors="pt" )
_UpperCamelCase : Any = inputs.pixel_values.to(__a )
# forward pass to make sure inference works in fp16
with torch.no_grad():
_UpperCamelCase : int = model(__a )
| 310 | 1 |
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
a_ = logging.get_logger(__name__)
a_ = OrderedDict(
[
('audio-spectrogram-transformer', 'ASTFeatureExtractor'),
('beit', 'BeitFeatureExtractor'),
('chinese_clip', 'ChineseCLIPFeatureExtractor'),
('clap', 'ClapFeatureExtractor'),
('clip', 'CLIPFeatureExtractor'),
('clipseg', 'ViTFeatureExtractor'),
('conditional_detr', 'ConditionalDetrFeatureExtractor'),
('convnext', 'ConvNextFeatureExtractor'),
('cvt', 'ConvNextFeatureExtractor'),
('data2vec-audio', 'Wav2Vec2FeatureExtractor'),
('data2vec-vision', 'BeitFeatureExtractor'),
('deformable_detr', 'DeformableDetrFeatureExtractor'),
('deit', 'DeiTFeatureExtractor'),
('detr', 'DetrFeatureExtractor'),
('dinat', 'ViTFeatureExtractor'),
('donut-swin', 'DonutFeatureExtractor'),
('dpt', 'DPTFeatureExtractor'),
('encodec', 'EncodecFeatureExtractor'),
('flava', 'FlavaFeatureExtractor'),
('glpn', 'GLPNFeatureExtractor'),
('groupvit', 'CLIPFeatureExtractor'),
('hubert', 'Wav2Vec2FeatureExtractor'),
('imagegpt', 'ImageGPTFeatureExtractor'),
('layoutlmv2', 'LayoutLMv2FeatureExtractor'),
('layoutlmv3', 'LayoutLMv3FeatureExtractor'),
('levit', 'LevitFeatureExtractor'),
('maskformer', 'MaskFormerFeatureExtractor'),
('mctct', 'MCTCTFeatureExtractor'),
('mobilenet_v1', 'MobileNetV1FeatureExtractor'),
('mobilenet_v2', 'MobileNetV2FeatureExtractor'),
('mobilevit', 'MobileViTFeatureExtractor'),
('nat', 'ViTFeatureExtractor'),
('owlvit', 'OwlViTFeatureExtractor'),
('perceiver', 'PerceiverFeatureExtractor'),
('poolformer', 'PoolFormerFeatureExtractor'),
('regnet', 'ConvNextFeatureExtractor'),
('resnet', 'ConvNextFeatureExtractor'),
('segformer', 'SegformerFeatureExtractor'),
('sew', 'Wav2Vec2FeatureExtractor'),
('sew-d', 'Wav2Vec2FeatureExtractor'),
('speech_to_text', 'Speech2TextFeatureExtractor'),
('speecht5', 'SpeechT5FeatureExtractor'),
('swiftformer', 'ViTFeatureExtractor'),
('swin', 'ViTFeatureExtractor'),
('swinv2', 'ViTFeatureExtractor'),
('table-transformer', 'DetrFeatureExtractor'),
('timesformer', 'VideoMAEFeatureExtractor'),
('tvlt', 'TvltFeatureExtractor'),
('unispeech', 'Wav2Vec2FeatureExtractor'),
('unispeech-sat', 'Wav2Vec2FeatureExtractor'),
('van', 'ConvNextFeatureExtractor'),
('videomae', 'VideoMAEFeatureExtractor'),
('vilt', 'ViltFeatureExtractor'),
('vit', 'ViTFeatureExtractor'),
('vit_mae', 'ViTFeatureExtractor'),
('vit_msn', 'ViTFeatureExtractor'),
('wav2vec2', 'Wav2Vec2FeatureExtractor'),
('wav2vec2-conformer', 'Wav2Vec2FeatureExtractor'),
('wavlm', 'Wav2Vec2FeatureExtractor'),
('whisper', 'WhisperFeatureExtractor'),
('xclip', 'CLIPFeatureExtractor'),
('yolos', 'YolosFeatureExtractor'),
]
)
a_ = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def lowerCamelCase__ ( _a):
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
SCREAMING_SNAKE_CASE : Optional[int] = model_type_to_module_name(_a)
SCREAMING_SNAKE_CASE : Optional[Any] = importlib.import_module(f".{module_name}" , "transformers.models")
try:
return getattr(_a , _a)
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(_a , "__name__" , _a) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
SCREAMING_SNAKE_CASE : List[Any] = importlib.import_module("transformers")
if hasattr(_a , _a):
return getattr(_a , _a)
return None
def lowerCamelCase__ ( _a , _a = None , _a = False , _a = False , _a = None , _a = None , _a = None , _a = False , **_a , ):
SCREAMING_SNAKE_CASE : Union[str, Any] = get_file_from_repo(
_a , _a , cache_dir=_a , force_download=_a , resume_download=_a , proxies=_a , use_auth_token=_a , revision=_a , local_files_only=_a , )
if resolved_config_file is None:
logger.info(
"Could not locate the feature extractor configuration file, will try to use the model config instead.")
return {}
with open(_a , encoding="utf-8") as reader:
return json.load(_a)
class _UpperCamelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ) -> Optional[int]:
"""simple docstring"""
raise EnvironmentError(
"AutoFeatureExtractor is designed to be instantiated "
"using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method." )
@classmethod
@replace_list_option_in_docstrings(a )
def __UpperCamelCase ( cls : Union[str, Any] , a : List[Any] , **a : Dict ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = kwargs.pop("config" , a )
SCREAMING_SNAKE_CASE : Optional[int] = kwargs.pop("trust_remote_code" , a )
SCREAMING_SNAKE_CASE : List[Any] = True
SCREAMING_SNAKE_CASE ,SCREAMING_SNAKE_CASE : Any = FeatureExtractionMixin.get_feature_extractor_dict(a , **a )
SCREAMING_SNAKE_CASE : Optional[Any] = config_dict.get("feature_extractor_type" , a )
SCREAMING_SNAKE_CASE : Dict = None
if "AutoFeatureExtractor" in config_dict.get("auto_map" , {} ):
SCREAMING_SNAKE_CASE : str = config_dict["auto_map"]["AutoFeatureExtractor"]
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(a , a ):
SCREAMING_SNAKE_CASE : Union[str, Any] = AutoConfig.from_pretrained(a , **a )
# It could be in `config.feature_extractor_type``
SCREAMING_SNAKE_CASE : Dict = getattr(a , "feature_extractor_type" , a )
if hasattr(a , "auto_map" ) and "AutoFeatureExtractor" in config.auto_map:
SCREAMING_SNAKE_CASE : List[str] = config.auto_map["AutoFeatureExtractor"]
if feature_extractor_class is not None:
SCREAMING_SNAKE_CASE : Optional[Any] = feature_extractor_class_from_name(a )
SCREAMING_SNAKE_CASE : List[Any] = feature_extractor_auto_map is not None
SCREAMING_SNAKE_CASE : Optional[Any] = feature_extractor_class is not None or type(a ) in FEATURE_EXTRACTOR_MAPPING
SCREAMING_SNAKE_CASE : Any = resolve_trust_remote_code(
a , a , a , a )
if has_remote_code and trust_remote_code:
SCREAMING_SNAKE_CASE : Union[str, Any] = get_class_from_dynamic_module(
a , a , **a )
SCREAMING_SNAKE_CASE : int = kwargs.pop("code_revision" , a )
if os.path.isdir(a ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(a , **a )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(a , **a )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(a ) in FEATURE_EXTRACTOR_MAPPING:
SCREAMING_SNAKE_CASE : Tuple = FEATURE_EXTRACTOR_MAPPING[type(a )]
return feature_extractor_class.from_dict(a , **a )
raise ValueError(
F"Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a "
F"`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following "
F"`model_type` keys in its {CONFIG_NAME}: {', '.join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}" )
@staticmethod
def __UpperCamelCase ( a : List[str] , a : Optional[int] ) -> Any:
"""simple docstring"""
FEATURE_EXTRACTOR_MAPPING.register(a , a ) | 76 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class lowercase__ ( __lowerCamelCase ):
'''simple docstring'''
def UpperCamelCase__ ( self, __magic_name__ ) -> Union[str, Any]:
"""simple docstring"""
with open(__magic_name__, encoding='''utf-8''' ) as input_file:
UpperCamelCase__ : Tuple = re.compile(R'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''' )
UpperCamelCase__ : str = input_file.read()
UpperCamelCase__ : List[Any] = regexp.search(__magic_name__ )
return match
def UpperCamelCase__ ( self, __magic_name__ ) -> Any:
"""simple docstring"""
with open(__magic_name__, encoding='''utf-8''' ) as input_file:
UpperCamelCase__ : Dict = re.compile(R'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''', re.DOTALL )
UpperCamelCase__ : Any = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCamelCase__ : Tuple = regexp.finditer(__magic_name__ )
UpperCamelCase__ : Dict = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def UpperCamelCase__ ( self ) -> List[Any]:
"""simple docstring"""
UpperCamelCase__ : int = Path('''./datasets''' )
UpperCamelCase__ : Any = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(__magic_name__ ) ):
raise AssertionError(f"open(...) must use utf-8 encoding in {dataset}" )
def UpperCamelCase__ ( self ) -> Dict:
"""simple docstring"""
UpperCamelCase__ : Optional[int] = Path('''./datasets''' )
UpperCamelCase__ : Optional[Any] = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_print_statements(str(__magic_name__ ) ):
raise AssertionError(f"print statement found in {dataset}. Use datasets.logger/logging instead." )
| 201 | 0 |
from dataclasses import dataclass, field
from typing import TYPE_CHECKING, Any, ClassVar, Dict, List, Optional, Union
import pyarrow as pa
if TYPE_CHECKING:
from .features import FeatureType
@dataclass
class lowercase_ :
'''simple docstring'''
__snake_case = 42
__snake_case = None
# Automatically constructed
__snake_case = "dict"
__snake_case = None
__snake_case = field(default='''Translation''' , init=lowercase , repr=lowercase )
def __call__( self : int ) ->List[Any]:
"""simple docstring"""
return pa.struct({lang: pa.string() for lang in sorted(self.languages )} )
def __lowerCAmelCase ( self : Optional[int] ) ->Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Value
return {k: Value('''string''' ) for k in sorted(self.languages )}
@dataclass
class lowercase_ :
'''simple docstring'''
__snake_case = None
__snake_case = None
__snake_case = None
# Automatically constructed
__snake_case = "dict"
__snake_case = None
__snake_case = field(default='''TranslationVariableLanguages''' , init=lowercase , repr=lowercase )
def __lowerCAmelCase ( self : Tuple ) ->int:
"""simple docstring"""
a = sorted(set(self.languages ) ) if self.languages else None
a = len(self.languages ) if self.languages else None
def __call__( self : int ) ->Union[str, Any]:
"""simple docstring"""
return pa.struct({'''language''': pa.list_(pa.string() ), '''translation''': pa.list_(pa.string() )} )
def __lowerCAmelCase ( self : int , __UpperCAmelCase : Tuple ) ->Any:
"""simple docstring"""
a = set(self.languages )
if self.languages and set(__UpperCAmelCase ) - lang_set:
raise ValueError(
F"""Some languages in example ({', '.join(sorted(set(__UpperCAmelCase ) - lang_set ) )}) are not in valid set ({', '.join(__UpperCAmelCase )}).""" )
# Convert dictionary into tuples, splitting out cases where there are
# multiple translations for a single language.
a = []
for lang, text in translation_dict.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
translation_tuples.append((lang, text) )
else:
translation_tuples.extend([(lang, el) for el in text] )
# Ensure translations are in ascending order by language code.
a , a = zip(*sorted(__UpperCAmelCase ) )
return {"language": languages, "translation": translations}
def __lowerCAmelCase ( self : List[Any] ) ->Union["FeatureType", Dict[str, "FeatureType"]]:
"""simple docstring"""
from .features import Sequence, Value
return {
"language": Sequence(Value('''string''' ) ),
"translation": Sequence(Value('''string''' ) ),
}
| 26 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def _a ( a :Union[str, Any] , a :List[Any] ) -> List[Any]:
a = checkpoint
a = {}
a = vae_state_dict['''encoder.conv_in.weight''']
a = vae_state_dict['''encoder.conv_in.bias''']
a = vae_state_dict['''encoder.conv_out.weight''']
a = vae_state_dict['''encoder.conv_out.bias''']
a = vae_state_dict['''encoder.norm_out.weight''']
a = vae_state_dict['''encoder.norm_out.bias''']
a = vae_state_dict['''decoder.conv_in.weight''']
a = vae_state_dict['''decoder.conv_in.bias''']
a = vae_state_dict['''decoder.conv_out.weight''']
a = vae_state_dict['''decoder.conv_out.bias''']
a = vae_state_dict['''decoder.norm_out.weight''']
a = vae_state_dict['''decoder.norm_out.bias''']
a = vae_state_dict['''quant_conv.weight''']
a = vae_state_dict['''quant_conv.bias''']
a = vae_state_dict['''post_quant_conv.weight''']
a = vae_state_dict['''post_quant_conv.bias''']
# Retrieves the keys for the encoder down blocks only
a = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''encoder.down''' in layer} )
a = {
layer_id: [key for key in vae_state_dict if F"""down.{layer_id}""" in key] for layer_id in range(a )
}
# Retrieves the keys for the decoder up blocks only
a = len({'''.'''.join(layer.split('''.''' )[:3] ) for layer in vae_state_dict if '''decoder.up''' in layer} )
a = {
layer_id: [key for key in vae_state_dict if F"""up.{layer_id}""" in key] for layer_id in range(a )
}
for i in range(a ):
a = [key for key in down_blocks[i] if F"""down.{i}""" in key and F"""down.{i}.downsample""" not in key]
if F"""encoder.down.{i}.downsample.conv.weight""" in vae_state_dict:
a = vae_state_dict.pop(
F"""encoder.down.{i}.downsample.conv.weight""" )
a = vae_state_dict.pop(
F"""encoder.down.{i}.downsample.conv.bias""" )
a = renew_vae_resnet_paths(a )
a = {'''old''': F"""down.{i}.block""", '''new''': F"""down_blocks.{i}.resnets"""}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
a = [key for key in vae_state_dict if '''encoder.mid.block''' in key]
a = 2
for i in range(1 , num_mid_res_blocks + 1 ):
a = [key for key in mid_resnets if F"""encoder.mid.block_{i}""" in key]
a = renew_vae_resnet_paths(a )
a = {'''old''': F"""mid.block_{i}""", '''new''': F"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
a = [key for key in vae_state_dict if '''encoder.mid.attn''' in key]
a = renew_vae_attention_paths(a )
a = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
conv_attn_to_linear(a )
for i in range(a ):
a = num_up_blocks - 1 - i
a = [
key for key in up_blocks[block_id] if F"""up.{block_id}""" in key and F"""up.{block_id}.upsample""" not in key
]
if F"""decoder.up.{block_id}.upsample.conv.weight""" in vae_state_dict:
a = vae_state_dict[
F"""decoder.up.{block_id}.upsample.conv.weight"""
]
a = vae_state_dict[
F"""decoder.up.{block_id}.upsample.conv.bias"""
]
a = renew_vae_resnet_paths(a )
a = {'''old''': F"""up.{block_id}.block""", '''new''': F"""up_blocks.{i}.resnets"""}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
a = [key for key in vae_state_dict if '''decoder.mid.block''' in key]
a = 2
for i in range(1 , num_mid_res_blocks + 1 ):
a = [key for key in mid_resnets if F"""decoder.mid.block_{i}""" in key]
a = renew_vae_resnet_paths(a )
a = {'''old''': F"""mid.block_{i}""", '''new''': F"""mid_block.resnets.{i - 1}"""}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
a = [key for key in vae_state_dict if '''decoder.mid.attn''' in key]
a = renew_vae_attention_paths(a )
a = {'''old''': '''mid.attn_1''', '''new''': '''mid_block.attentions.0'''}
assign_to_checkpoint(a , a , a , additional_replacements=[meta_path] , config=a )
conv_attn_to_linear(a )
return new_checkpoint
def _a ( a :str , a :str , ) -> List[str]:
# Only support V1
a = requests.get(
''' https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml''' )
a = io.BytesIO(r.content )
a = OmegaConf.load(a )
a = 512
a = '''cuda''' if torch.cuda.is_available() else '''cpu'''
if checkpoint_path.endswith('''safetensors''' ):
from safetensors import safe_open
a = {}
with safe_open(a , framework='''pt''' , device='''cpu''' ) as f:
for key in f.keys():
a = f.get_tensor(a )
else:
a = torch.load(a , map_location=a )['''state_dict''']
# Convert the VAE model.
a = create_vae_diffusers_config(a , image_size=a )
a = custom_convert_ldm_vae_checkpoint(a , a )
a = AutoencoderKL(**a )
vae.load_state_dict(a )
vae.save_pretrained(a )
if __name__ == "__main__":
UpperCAmelCase__ = argparse.ArgumentParser()
parser.add_argument("--vae_pt_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
UpperCAmelCase__ = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 26 | 1 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
)
| 111 |
import math
def A__ ( SCREAMING_SNAKE_CASE__) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(SCREAMING_SNAKE_CASE__) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def A__ ( SCREAMING_SNAKE_CASE__ = 1_0001) -> int:
try:
__snake_case: List[str] = int(SCREAMING_SNAKE_CASE__)
except (TypeError, ValueError):
raise TypeError("""Parameter nth must be int or castable to int.""") from None
if nth <= 0:
raise ValueError("""Parameter nth must be greater than or equal to one.""")
__snake_case: list[int] = []
__snake_case: List[str] = 2
while len(SCREAMING_SNAKE_CASE__) < nth:
if is_prime(SCREAMING_SNAKE_CASE__):
primes.append(SCREAMING_SNAKE_CASE__)
num += 1
else:
num += 1
return primes[len(SCREAMING_SNAKE_CASE__) - 1]
if __name__ == "__main__":
print(f'{solution() = }')
| 111 | 1 |
from __future__ import annotations
from collections.abc import Generator
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: Any = {}
UpperCAmelCase_: Union[str, Any] = 2
while True:
UpperCAmelCase_: List[Any] = factor_map.pop(lowerCAmelCase__ , lowerCAmelCase__ )
if factor:
UpperCAmelCase_: Union[str, Any] = factor + prime
while x in factor_map:
x += factor
UpperCAmelCase_: Union[str, Any] = factor
else:
UpperCAmelCase_: List[Any] = prime
yield prime
prime += 1
def lowerCAmelCase_ (lowerCAmelCase__: float = 1e10 ):
"""simple docstring"""
UpperCAmelCase_: str = sieve()
UpperCAmelCase_: Union[str, Any] = 1
while True:
UpperCAmelCase_: int = next(lowerCAmelCase__ )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(lowerCAmelCase__ )
n += 2
if __name__ == "__main__":
print(solution())
| 352 |
def lowerCAmelCase_ (lowerCAmelCase__: int , lowerCAmelCase__: float , lowerCAmelCase__: float ):
"""simple docstring"""
return round(float(moles / volume ) * nfactor )
def lowerCAmelCase_ (lowerCAmelCase__: float , lowerCAmelCase__: float , lowerCAmelCase__: float ):
"""simple docstring"""
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def lowerCAmelCase_ (lowerCAmelCase__: float , lowerCAmelCase__: float , lowerCAmelCase__: float ):
"""simple docstring"""
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def lowerCAmelCase_ (lowerCAmelCase__: float , lowerCAmelCase__: float , lowerCAmelCase__: float ):
"""simple docstring"""
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 82 | 0 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
UpperCamelCase = None
class a_ ( snake_case_ ):
'''simple docstring'''
def __init__( self , A=1 , A=0 , A=2 , A=512 , A="cls" , A=False , A=True , **A , ) -> int:
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
_SCREAMING_SNAKE_CASE = project_dim
_SCREAMING_SNAKE_CASE = pooler_fn
_SCREAMING_SNAKE_CASE = learn_encoder
_SCREAMING_SNAKE_CASE = use_attention_mask
class a_ ( snake_case_ ):
'''simple docstring'''
UpperCamelCase = [R'''pooler''', R'''logit_scale''']
UpperCamelCase = [R'''position_ids''', R'''predictions.decoder.bias''']
UpperCamelCase = '''roberta'''
UpperCamelCase = RobertaSeriesConfig
def __init__( self , A ) -> Optional[int]:
super().__init__(A )
_SCREAMING_SNAKE_CASE = XLMRobertaModel(A )
_SCREAMING_SNAKE_CASE = nn.Linear(config.hidden_size , config.project_dim )
_SCREAMING_SNAKE_CASE = getattr(A , """has_pre_transformation""" , A )
if self.has_pre_transformation:
_SCREAMING_SNAKE_CASE = nn.Linear(config.hidden_size , config.project_dim )
_SCREAMING_SNAKE_CASE = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def snake_case_( self , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , A = None , ) -> Any:
_SCREAMING_SNAKE_CASE = return_dict if return_dict is not None else self.config.use_return_dict
_SCREAMING_SNAKE_CASE = self.base_model(
input_ids=A , attention_mask=A , token_type_ids=A , position_ids=A , head_mask=A , inputs_embeds=A , encoder_hidden_states=A , encoder_attention_mask=A , output_attentions=A , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=A , )
if self.has_pre_transformation:
_SCREAMING_SNAKE_CASE = outputs["""hidden_states"""][-2]
_SCREAMING_SNAKE_CASE = self.pre_LN(A )
_SCREAMING_SNAKE_CASE = self.transformation_pre(A )
return TransformationModelOutput(
projection_state=A , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
_SCREAMING_SNAKE_CASE = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=A , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 58 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RobertaConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.roberta.modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
)
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__(self , _lowerCamelCase , _lowerCamelCase=13 , _lowerCamelCase=7 , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=True , _lowerCamelCase=99 , _lowerCamelCase=32 , _lowerCamelCase=5 , _lowerCamelCase=4 , _lowerCamelCase=37 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=512 , _lowerCamelCase=16 , _lowerCamelCase=2 , _lowerCamelCase=0.02 , _lowerCamelCase=4 , ):
"""simple docstring"""
UpperCAmelCase__ : Optional[Any] = parent
UpperCAmelCase__ : Any = batch_size
UpperCAmelCase__ : Optional[int] = seq_length
UpperCAmelCase__ : Any = is_training
UpperCAmelCase__ : int = use_attention_mask
UpperCAmelCase__ : Any = use_token_type_ids
UpperCAmelCase__ : Any = use_labels
UpperCAmelCase__ : Union[str, Any] = vocab_size
UpperCAmelCase__ : str = hidden_size
UpperCAmelCase__ : List[Any] = num_hidden_layers
UpperCAmelCase__ : Optional[Any] = num_attention_heads
UpperCAmelCase__ : str = intermediate_size
UpperCAmelCase__ : Dict = hidden_act
UpperCAmelCase__ : Union[str, Any] = hidden_dropout_prob
UpperCAmelCase__ : str = attention_probs_dropout_prob
UpperCAmelCase__ : str = max_position_embeddings
UpperCAmelCase__ : Union[str, Any] = type_vocab_size
UpperCAmelCase__ : Dict = type_sequence_label_size
UpperCAmelCase__ : Optional[int] = initializer_range
UpperCAmelCase__ : int = num_choices
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase__ : int = None
if self.use_attention_mask:
UpperCAmelCase__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase__ : List[Any] = None
if self.use_token_type_ids:
UpperCAmelCase__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase__ : Optional[Any] = RobertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_lowerCamelCase , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : int = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : Tuple = config_and_inputs
UpperCAmelCase__ : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : List[Any] = self.prepare_config_and_inputs()
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[Any] = config_and_inputs
UpperCAmelCase__ : Dict = True
UpperCAmelCase__ : Optional[Any] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class lowerCamelCase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
SCREAMING_SNAKE_CASE = True
SCREAMING_SNAKE_CASE = (
(
FlaxRobertaModel,
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _a (self ):
"""simple docstring"""
UpperCAmelCase__ : str = FlaxRobertaModelTester(self )
@slow
def _a (self ):
"""simple docstring"""
for model_class_name in self.all_model_classes:
UpperCAmelCase__ : Dict = model_class_name.from_pretrained("""roberta-base""" , from_pt=_lowerCamelCase )
UpperCAmelCase__ : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_lowerCamelCase )
| 171 | 0 |
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
__A : Optional[Any] = _symbol_database.Default()
__A : Union[str, Any] = _descriptor_pool.Default().AddSerializedFile(
b'\n\x19sentencepiece_model.proto\x12\rsentencepiece"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03'
)
__A : Dict = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'sentencepiece_model_pb2', _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
__A : List[Any] = None
__A : Union[str, Any] = b'H\003'
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
__A : Dict = 45
__A : Optional[Any] = 1_581
__A : Dict = 1_517
__A : Tuple = 1_570
__A : List[Any] = 1_584
__A : Union[str, Any] = 1_793
__A : List[Any] = 1_795
__A : Optional[int] = 1_916
__A : List[str] = 1_864
__A : List[str] = 1_905
__A : Any = 1_919
__A : Any = 2_429
__A : Dict = 2_208
__A : Optional[int] = 2_418
__A : Optional[int] = 2_323
__A : Union[str, Any] = 2_407
# @@protoc_insertion_point(module_scope) | 8 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
if is_torch_available():
import torch
from transformers import XLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_torch
class __UpperCamelCase ( unittest.TestCase ):
@slow
def a__ ( self :Dict ):
snake_case_ : Optional[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-base""" )
snake_case_ : Optional[int] = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
snake_case_ : Tuple = torch.Size((1, 1_2, 7_6_8) ) # batch_size, sequence_length, embedding_vector_dim
snake_case_ : Dict = torch.tensor(
[[-0.01_01, 0.12_18, -0.08_03, 0.08_01, 0.13_27, 0.07_76, -0.12_15, 0.23_83, 0.33_38, 0.31_06, 0.03_00, 0.02_52]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.base')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
snake_case_ : Tuple = model(_UpperCamelCase )["""last_hidden_state"""].detach()
self.assertEqual(output.shape ,_UpperCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] ,_UpperCamelCase ,atol=1E-3 ) )
@slow
def a__ ( self :Union[str, Any] ):
snake_case_ : List[Any] = XLMRobertaModel.from_pretrained("""xlm-roberta-large""" )
snake_case_ : Dict = torch.tensor([[0, 5_8_1, 1_0_2_6_9, 8_3, 9_9_9_4_2, 1_3_6, 6_0_7_4_2, 2_3, 7_0, 8_0_5_8_3, 1_8_2_7_6, 2]] )
# The dog is cute and lives in the garden house
snake_case_ : List[Any] = torch.Size((1, 1_2, 1_0_2_4) ) # batch_size, sequence_length, embedding_vector_dim
snake_case_ : Any = torch.tensor(
[[-0.06_99, -0.03_18, 0.07_05, -0.12_41, 0.09_99, -0.05_20, 0.10_04, -0.18_38, -0.47_04, 0.14_37, 0.08_21, 0.01_26]] )
# xlmr = torch.hub.load('pytorch/fairseq', 'xlmr.large')
# xlmr.eval()
# expected_output_values_last_dim = xlmr.extract_features(input_ids[0])[:, :, -1]
with torch.no_grad():
snake_case_ : str = model(_UpperCamelCase )["""last_hidden_state"""].detach()
self.assertEqual(output.shape ,_UpperCamelCase )
# compare the actual values for a slice of last dim
self.assertTrue(torch.allclose(output[:, :, -1] ,_UpperCamelCase ,atol=1E-3 ) ) | 8 | 1 |
"""simple docstring"""
import os
from argparse import ArgumentParser, Namespace
from ..data import SingleSentenceClassificationProcessor as Processor
from ..pipelines import TextClassificationPipeline
from ..utils import is_tf_available, is_torch_available, logging
from . import BaseTransformersCLICommand
if not is_tf_available() and not is_torch_available():
raise RuntimeError('At least one of PyTorch or TensorFlow 2.0+ should be installed to use CLI training')
# TF training parameters
__UpperCAmelCase = False
__UpperCAmelCase = False
def _snake_case ( lowercase__ : Namespace ) -> str:
'''simple docstring'''
return TrainCommand(lowercase__ )
class _SCREAMING_SNAKE_CASE ( A__ ):
@staticmethod
def __lowerCAmelCase ( __A ) -> int:
lowerCAmelCase_ :List[str] = parser.add_parser("""train""" , help="""CLI tool to train a model on a task.""" )
train_parser.add_argument(
"""--train_data""" , type=__A , required=__A , help="""path to train (and optionally evaluation) dataset as a csv with tab separated labels and sentences.""" , )
train_parser.add_argument(
"""--column_label""" , type=__A , default=0 , help="""Column of the dataset csv file with example labels.""" )
train_parser.add_argument(
"""--column_text""" , type=__A , default=1 , help="""Column of the dataset csv file with example texts.""" )
train_parser.add_argument(
"""--column_id""" , type=__A , default=2 , help="""Column of the dataset csv file with example ids.""" )
train_parser.add_argument(
"""--skip_first_row""" , action="""store_true""" , help="""Skip the first row of the csv file (headers).""" )
train_parser.add_argument("""--validation_data""" , type=__A , default="""""" , help="""path to validation dataset.""" )
train_parser.add_argument(
"""--validation_split""" , type=__A , default=0.1 , help="""if validation dataset is not provided, fraction of train dataset to use as validation dataset.""" , )
train_parser.add_argument("""--output""" , type=__A , default="""./""" , help="""path to saved the trained model.""" )
train_parser.add_argument(
"""--task""" , type=__A , default="""text_classification""" , help="""Task to train the model on.""" )
train_parser.add_argument(
"""--model""" , type=__A , default="""bert-base-uncased""" , help="""Model's name or path to stored model.""" )
train_parser.add_argument("""--train_batch_size""" , type=__A , default=32 , help="""Batch size for training.""" )
train_parser.add_argument("""--valid_batch_size""" , type=__A , default=64 , help="""Batch size for validation.""" )
train_parser.add_argument("""--learning_rate""" , type=__A , default=3E-5 , help="""Learning rate.""" )
train_parser.add_argument("""--adam_epsilon""" , type=__A , default=1E-08 , help="""Epsilon for Adam optimizer.""" )
train_parser.set_defaults(func=__A )
def __init__( self , __A ) -> Dict:
lowerCAmelCase_ :List[Any] = logging.get_logger("""transformers-cli/training""" )
lowerCAmelCase_ :int = """tf""" if is_tf_available() else """torch"""
os.makedirs(args.output , exist_ok=__A )
lowerCAmelCase_ :List[Any] = args.output
lowerCAmelCase_ :int = args.column_label
lowerCAmelCase_ :int = args.column_text
lowerCAmelCase_ :Union[str, Any] = args.column_id
self.logger.info(f"""Loading {args.task} pipeline for {args.model}""" )
if args.task == "text_classification":
lowerCAmelCase_ :Tuple = TextClassificationPipeline.from_pretrained(args.model )
elif args.task == "token_classification":
raise NotImplementedError
elif args.task == "question_answering":
raise NotImplementedError
self.logger.info(f"""Loading dataset from {args.train_data}""" )
lowerCAmelCase_ :Any = Processor.create_from_csv(
args.train_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowerCAmelCase_ :str = None
if args.validation_data:
self.logger.info(f"""Loading validation dataset from {args.validation_data}""" )
lowerCAmelCase_ :List[Any] = Processor.create_from_csv(
args.validation_data , column_label=args.column_label , column_text=args.column_text , column_id=args.column_id , skip_first_row=args.skip_first_row , )
lowerCAmelCase_ :Optional[Any] = args.validation_split
lowerCAmelCase_ :str = args.train_batch_size
lowerCAmelCase_ :str = args.valid_batch_size
lowerCAmelCase_ :Optional[int] = args.learning_rate
lowerCAmelCase_ :Union[str, Any] = args.adam_epsilon
def __lowerCAmelCase ( self ) -> Optional[int]:
if self.framework == "tf":
return self.run_tf()
return self.run_torch()
def __lowerCAmelCase ( self ) -> Tuple:
raise NotImplementedError
def __lowerCAmelCase ( self ) -> Optional[int]:
self.pipeline.fit(
self.train_dataset , validation_data=self.valid_dataset , validation_split=self.validation_split , learning_rate=self.learning_rate , adam_epsilon=self.adam_epsilon , train_batch_size=self.train_batch_size , valid_batch_size=self.valid_batch_size , )
# Save trained pipeline
self.pipeline.save_pretrained(self.output )
| 84 |
"""simple docstring"""
import json
import os
import tempfile
import transformers
import datasets
from utils import generate_example_dataset, get_duration
_UpperCAmelCase = 5_0_0_0_0_0
_UpperCAmelCase, _UpperCAmelCase = os.path.split(__file__)
_UpperCAmelCase = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def __magic_name__ ( lowercase , **lowercase ):
SCREAMING_SNAKE_CASE_: Union[str, Any] =dataset.map(**lowercase )
@get_duration
def __magic_name__ ( lowercase , **lowercase ):
SCREAMING_SNAKE_CASE_: Optional[Any] =dataset.filter(**lowercase )
def __magic_name__ ( ):
SCREAMING_SNAKE_CASE_: Union[str, Any] ={"""num examples""": SPEED_TEST_N_EXAMPLES}
with tempfile.TemporaryDirectory() as tmp_dir:
SCREAMING_SNAKE_CASE_: Optional[Any] =datasets.Features({"""text""": datasets.Value("""string""" ), """numbers""": datasets.Value("""float32""" )} )
SCREAMING_SNAKE_CASE_: int =generate_example_dataset(
os.path.join(lowercase , """dataset.arrow""" ) , lowercase , num_examples=lowercase )
SCREAMING_SNAKE_CASE_: int =transformers.AutoTokenizer.from_pretrained("""bert-base-cased""" , use_fast=lowercase )
def tokenize(lowercase ):
return tokenizer(examples["""text"""] )
SCREAMING_SNAKE_CASE_: Tuple =map(lowercase )
SCREAMING_SNAKE_CASE_: str =map(lowercase , batched=lowercase )
SCREAMING_SNAKE_CASE_: Optional[int] =map(lowercase , function=lambda lowercase : None , batched=lowercase )
with dataset.formatted_as(type="""numpy""" ):
SCREAMING_SNAKE_CASE_: Any =map(lowercase , function=lambda lowercase : None , batched=lowercase )
with dataset.formatted_as(type="""pandas""" ):
SCREAMING_SNAKE_CASE_: Dict =map(lowercase , function=lambda lowercase : None , batched=lowercase )
with dataset.formatted_as(type="""torch""" , columns="""numbers""" ):
SCREAMING_SNAKE_CASE_: Tuple =map(lowercase , function=lambda lowercase : None , batched=lowercase )
with dataset.formatted_as(type="""tensorflow""" , columns="""numbers""" ):
SCREAMING_SNAKE_CASE_: int =map(lowercase , function=lambda lowercase : None , batched=lowercase )
SCREAMING_SNAKE_CASE_: Tuple =map(lowercase , function=lowercase , batched=lowercase )
SCREAMING_SNAKE_CASE_: List[Any] =filter(lowercase )
# Activate later when tokenizer support batched inputs
# with dataset.formatted_as(type='numpy'):
# times[func.__name__ + " fast-tokenizer batched numpy"] = func(dataset, function=tokenize, batched=True)
with open(lowercase , """wb""" ) as f:
f.write(json.dumps(lowercase ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_map_filter()
| 173 | 0 |
'''simple docstring'''
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_flax_cross_test,
require_flax,
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import is_flax_available, is_torch_available, is_vision_available
from ...test_modeling_flax_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_flax_bert import FlaxBertModelTester
from ..clip.test_modeling_flax_clip import FlaxCLIPVisionModelTester
from ..vit.test_modeling_flax_vit import FlaxViTModelTester
if is_flax_available():
from transformers import (
FlaxBertModel,
FlaxCLIPVisionModel,
FlaxVisionTextDualEncoderModel,
FlaxViTModel,
VisionTextDualEncoderConfig,
VisionTextDualEncoderProcessor,
)
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
if is_torch_available():
import torch
from transformers import VisionTextDualEncoderModel
if is_vision_available():
from PIL import Image
def _lowerCAmelCase ( lowerCamelCase_ : List[str] ):
if isinstance(SCREAMING_SNAKE_CASE_ , collections.abc.Iterable ):
return x
return (x, x)
@require_flax
class __lowercase :
'''simple docstring'''
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> List[str]:
'''simple docstring'''
pass
def _UpperCAmelCase (self ) -> Optional[int]:
'''simple docstring'''
pass
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
pass
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase = np.abs((a - b) ).max()
self.assertLessEqual(_lowercase ,_lowercase ,f"Difference between torch and flax is {diff} (>= {tol})." )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase=None ,**_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = VisionTextDualEncoderConfig.from_vision_text_configs(_lowercase ,_lowercase )
__lowercase = FlaxVisionTextDualEncoderModel(_lowercase )
__lowercase = model(input_ids=_lowercase ,pixel_values=_lowercase ,attention_mask=_lowercase )
self.assertEqual(output['''text_embeds'''].shape ,(input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape ,(pixel_values.shape[0], config.projection_dim) )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase=None ,**_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase , __lowercase = self.get_vision_text_model(_lowercase ,_lowercase )
__lowercase = {'''vision_model''': vision_model, '''text_model''': text_model}
__lowercase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowercase )
__lowercase = model(input_ids=_lowercase ,pixel_values=_lowercase ,attention_mask=_lowercase )
self.assertEqual(output['''text_embeds'''].shape ,(input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['''image_embeds'''].shape ,(pixel_values.shape[0], model.config.projection_dim) )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase=None ,**_lowerCamelCase ) -> int:
'''simple docstring'''
__lowercase , __lowercase = self.get_vision_text_model(_lowercase ,_lowercase )
__lowercase = {'''vision_model''': vision_model, '''text_model''': text_model}
__lowercase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowercase )
__lowercase = model(input_ids=_lowercase ,pixel_values=_lowercase ,attention_mask=_lowercase )
__lowercase = output[0]
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_lowercase )
__lowercase = FlaxVisionTextDualEncoderModel.from_pretrained(_lowercase )
__lowercase = model(input_ids=_lowercase ,pixel_values=_lowercase ,attention_mask=_lowercase )
__lowercase = after_output[0]
__lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowercase ,1E-3 )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase=None ,**_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
__lowercase , __lowercase = self.get_vision_text_model(_lowercase ,_lowercase )
__lowercase = {'''vision_model''': vision_model, '''text_model''': text_model}
__lowercase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(**_lowercase )
__lowercase = model(
input_ids=_lowercase ,pixel_values=_lowercase ,attention_mask=_lowercase ,output_attentions=_lowercase )
__lowercase = output.vision_model_output.attentions
self.assertEqual(len(_lowercase ) ,vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__lowercase = to_atuple(vision_model.config.image_size )
__lowercase = to_atuple(vision_model.config.patch_size )
__lowercase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__lowercase = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] ,(vision_config.num_attention_heads, seq_len, seq_len) )
__lowercase = output.text_model_output.attentions
self.assertEqual(len(_lowercase ) ,text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] ,(text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) ,)
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> Optional[Any]:
'''simple docstring'''
pt_model.to(_lowercase )
pt_model.eval()
# prepare inputs
__lowercase = inputs_dict
__lowercase = {k: torch.tensor(v.tolist() ) for k, v in flax_inputs.items()}
with torch.no_grad():
__lowercase = pt_model(**_lowercase ).to_tuple()
__lowercase = fx_model(**_lowercase ).to_tuple()
self.assertEqual(len(_lowercase ) ,len(_lowercase ) ,'''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output in zip(fx_outputs[:4] ,pt_outputs[:4] ):
self.assert_almost_equals(_lowercase ,pt_output.numpy() ,4E-2 )
# PT -> Flax
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_lowercase )
__lowercase = FlaxVisionTextDualEncoderModel.from_pretrained(_lowercase ,from_pt=_lowercase )
__lowercase = fx_model_loaded(**_lowercase ).to_tuple()
self.assertEqual(len(_lowercase ) ,len(_lowercase ) ,'''Output lengths differ between Flax and PyTorch''' )
for fx_output_loaded, pt_output in zip(fx_outputs_loaded[:4] ,pt_outputs[:4] ):
self.assert_almost_equals(_lowercase ,pt_output.numpy() ,4E-2 )
# Flax -> PT
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_lowercase )
__lowercase = VisionTextDualEncoderModel.from_pretrained(_lowercase ,from_flax=_lowercase )
pt_model_loaded.to(_lowercase )
pt_model_loaded.eval()
with torch.no_grad():
__lowercase = pt_model_loaded(**_lowercase ).to_tuple()
self.assertEqual(len(_lowercase ) ,len(_lowercase ) ,'''Output lengths differ between Flax and PyTorch''' )
for fx_output, pt_output_loaded in zip(fx_outputs[:4] ,pt_outputs_loaded[:4] ):
self.assert_almost_equals(_lowercase ,pt_output_loaded.numpy() ,4E-2 )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = VisionTextDualEncoderConfig.from_vision_text_configs(_lowercase ,_lowercase )
__lowercase = VisionTextDualEncoderModel(_lowercase )
__lowercase = FlaxVisionTextDualEncoderModel(_lowercase )
__lowercase = convert_pytorch_state_dict_to_flax(pt_model.state_dict() ,_lowercase )
__lowercase = fx_state
self.check_pt_flax_equivalence(_lowercase ,_lowercase ,_lowercase )
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ,_lowerCamelCase ) -> List[Any]:
'''simple docstring'''
__lowercase = VisionTextDualEncoderConfig.from_vision_text_configs(_lowercase ,_lowercase )
__lowercase = VisionTextDualEncoderModel(_lowercase )
__lowercase = FlaxVisionTextDualEncoderModel(_lowercase )
__lowercase = load_flax_weights_in_pytorch_model(_lowercase ,fx_model.params )
self.check_pt_flax_equivalence(_lowercase ,_lowercase ,_lowercase )
def _UpperCAmelCase (self ) -> List[str]:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_lowercase )
def _UpperCAmelCase (self ) -> Dict:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_lowercase )
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
self.check_save_load(**_lowercase )
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_lowercase )
@is_pt_flax_cross_test
def _UpperCAmelCase (self ) -> List[Any]:
'''simple docstring'''
__lowercase = self.prepare_config_and_inputs()
__lowercase = config_inputs_dict.pop('''vision_config''' )
__lowercase = config_inputs_dict.pop('''text_config''' )
__lowercase = config_inputs_dict
self.check_equivalence_pt_to_flax(_lowercase ,_lowercase ,_lowercase )
self.check_equivalence_flax_to_pt(_lowercase ,_lowercase ,_lowercase )
@slow
def _UpperCAmelCase (self ) -> Tuple:
'''simple docstring'''
__lowercase , __lowercase = self.get_pretrained_model_and_inputs()
__lowercase = model_a(**_lowercase )
__lowercase = outputs[0]
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_lowercase )
__lowercase = FlaxVisionTextDualEncoderModel.from_pretrained(_lowercase )
__lowercase = model_a(**_lowercase )
__lowercase = after_outputs[0]
__lowercase = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_lowercase ,1E-5 )
@require_flax
class __lowercase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-vit''' ,'''hf-internal-testing/tiny-bert''' ,vision_from_pt=_lowercase ,text_from_pt=_lowercase ,)
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] ,model.config.text_config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> Union[str, Any]:
'''simple docstring'''
__lowercase = FlaxViTModel(_lowercase )
__lowercase = FlaxBertModel(_lowercase )
return vision_model, text_model
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = FlaxViTModelTester(self )
__lowercase = FlaxBertModelTester(self )
__lowercase = vit_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase = vision_config_and_inputs
__lowercase , __lowercase , __lowercase , __lowercase = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_torch
class __lowercase ( lowerCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
def _UpperCAmelCase (self ) -> Optional[Any]:
'''simple docstring'''
__lowercase = FlaxVisionTextDualEncoderModel.from_vision_text_pretrained(
'''hf-internal-testing/tiny-random-clip''' ,'''hf-internal-testing/tiny-bert''' ,vision_from_pt=_lowercase ,text_from_pt=_lowercase ,)
__lowercase = 13
__lowercase = floats_tensor(
[
batch_size,
model.config.vision_config.num_channels,
model.config.vision_config.image_size,
model.config.vision_config.image_size,
] )
__lowercase = ids_tensor([batch_size, 4] ,model.config.text_config.vocab_size )
__lowercase = random_attention_mask([batch_size, 4] )
__lowercase = {'''pixel_values''': pixel_values, '''input_ids''': input_ids, '''attention_mask''': attention_mask}
return model, inputs
def _UpperCAmelCase (self ,_lowerCamelCase ,_lowerCamelCase ) -> str:
'''simple docstring'''
__lowercase = FlaxCLIPVisionModel(_lowercase )
__lowercase = FlaxBertModel(_lowercase )
return vision_model, text_model
def _UpperCAmelCase (self ) -> Any:
'''simple docstring'''
__lowercase = FlaxCLIPVisionModelTester(self )
__lowercase = FlaxBertModelTester(self )
__lowercase = clip_model_tester.prepare_config_and_inputs()
__lowercase = bert_model_tester.prepare_config_and_inputs()
__lowercase , __lowercase = vision_config_and_inputs
__lowercase , __lowercase , __lowercase , __lowercase = text_config_and_inputs
# make sure that cross attention layers are added
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": attention_mask,
"input_ids": input_ids,
"token_type_ids": token_type_ids,
}
@require_flax
@require_vision
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _UpperCAmelCase (self ) -> str:
'''simple docstring'''
__lowercase = FlaxVisionTextDualEncoderModel.from_pretrained('''clip-italian/clip-italian''' ,logit_scale_init_value=1.0 )
__lowercase = VisionTextDualEncoderProcessor.from_pretrained('''clip-italian/clip-italian''' )
__lowercase = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
__lowercase = processor(
text=['''una foto di un gatto''', '''una foto di un cane'''] ,images=_lowercase ,padding=_lowercase ,return_tensors='''np''' )
__lowercase = model(**_lowercase )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape ,(inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape ,(inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) ,)
__lowercase = np.array([[1.2_2_8_4_7_2_7, 0.3_1_0_4_1_2_2]] )
self.assertTrue(np.allclose(outputs.logits_per_image ,_lowercase ,atol=1E-3 ) )
| 366 |
'''simple docstring'''
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class __lowercase ( lowerCAmelCase__ ):
'''simple docstring'''
a : Union[List[PIL.Image.Image], np.ndarray]
a : Optional[List[bool]]
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline
| 217 | 0 |
from __future__ import absolute_import, division, print_function, unicode_literals
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers import RobertaConfig
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.roberta.modeling_roberta import (
ROBERTA_INPUTS_DOCSTRING,
ROBERTA_START_DOCSTRING,
RobertaEmbeddings,
)
from .modeling_highway_bert import BertPreTrainedModel, DeeBertModel, HighwayException, entropy
@add_start_docstrings(
'''The RoBERTa Model transformer with early exiting (DeeRoBERTa). ''' , a , )
class __snake_case ( a ):
UpperCAmelCase__ : List[str] = RobertaConfig
UpperCAmelCase__ : str = '''roberta'''
def __init__( self : Dict , _snake_case : Union[str, Any]):
"""simple docstring"""
super().__init__(_snake_case)
UpperCAmelCase_ = RobertaEmbeddings(_snake_case)
self.init_weights()
@add_start_docstrings(
'''RoBERTa Model (with early exiting - DeeRoBERTa) with a classifier on top,
also takes care of multi-layer training. ''' , a , )
class __snake_case ( a ):
UpperCAmelCase__ : Any = RobertaConfig
UpperCAmelCase__ : Dict = '''roberta'''
def __init__( self : int , _snake_case : Tuple):
"""simple docstring"""
super().__init__(_snake_case)
UpperCAmelCase_ = config.num_labels
UpperCAmelCase_ = config.num_hidden_layers
UpperCAmelCase_ = DeeRobertaModel(_snake_case)
UpperCAmelCase_ = nn.Dropout(config.hidden_dropout_prob)
UpperCAmelCase_ = nn.Linear(config.hidden_size , self.config.num_labels)
@add_start_docstrings_to_model_forward(_snake_case)
def lowerCamelCase ( self : Union[str, Any] , _snake_case : Union[str, Any]=None , _snake_case : Optional[int]=None , _snake_case : Tuple=None , _snake_case : Any=None , _snake_case : List[str]=None , _snake_case : int=None , _snake_case : int=None , _snake_case : Union[str, Any]=-1 , _snake_case : List[str]=False , ):
"""simple docstring"""
UpperCAmelCase_ = self.num_layers
try:
UpperCAmelCase_ = self.roberta(
_snake_case , attention_mask=_snake_case , token_type_ids=_snake_case , position_ids=_snake_case , head_mask=_snake_case , inputs_embeds=_snake_case , )
UpperCAmelCase_ = outputs[1]
UpperCAmelCase_ = self.dropout(_snake_case)
UpperCAmelCase_ = self.classifier(_snake_case)
UpperCAmelCase_ = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
UpperCAmelCase_ = e.message
UpperCAmelCase_ = e.exit_layer
UpperCAmelCase_ = outputs[0]
if not self.training:
UpperCAmelCase_ = entropy(_snake_case)
UpperCAmelCase_ = []
UpperCAmelCase_ = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
UpperCAmelCase_ = MSELoss()
UpperCAmelCase_ = loss_fct(logits.view(-1) , labels.view(-1))
else:
UpperCAmelCase_ = CrossEntropyLoss()
UpperCAmelCase_ = loss_fct(logits.view(-1 , self.num_labels) , labels.view(-1))
# work with highway exits
UpperCAmelCase_ = []
for highway_exit in outputs[-1]:
UpperCAmelCase_ = highway_exit[0]
if not self.training:
highway_logits_all.append(_snake_case)
highway_entropy.append(highway_exit[2])
if self.num_labels == 1:
# We are doing regression
UpperCAmelCase_ = MSELoss()
UpperCAmelCase_ = loss_fct(highway_logits.view(-1) , labels.view(-1))
else:
UpperCAmelCase_ = CrossEntropyLoss()
UpperCAmelCase_ = loss_fct(highway_logits.view(-1 , self.num_labels) , labels.view(-1))
highway_losses.append(_snake_case)
if train_highway:
UpperCAmelCase_ = (sum(highway_losses[:-1]),) + outputs
# exclude the final highway, of course
else:
UpperCAmelCase_ = (loss,) + outputs
if not self.training:
UpperCAmelCase_ = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
UpperCAmelCase_ = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), entropy
| 51 |
"""simple docstring"""
import copy
import os
import cva
import numpy as np
from matplotlib import pyplot as plt
class UpperCamelCase_ :
def __init__( self : str ) -> Dict:
UpperCAmelCase_ : List[Any] = ""
UpperCAmelCase_ : int = ""
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : int = 0
UpperCAmelCase_ : List[Any] = 256
UpperCAmelCase_ : Dict = 0
UpperCAmelCase_ : List[Any] = 0
UpperCAmelCase_ : str = 0
UpperCAmelCase_ : List[str] = 0
def _SCREAMING_SNAKE_CASE ( self : List[str] , lowerCAmelCase_ : Dict ) -> Optional[Any]:
UpperCAmelCase_ : Dict = cva.imread(lowerCAmelCase_ , 0 )
UpperCAmelCase_ : Union[str, Any] = copy.deepcopy(self.img )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = plt.hist(self.img.ravel() , 256 , [0, 256] , label="x" )
UpperCAmelCase_ : List[Any] = np.sum(lowerCAmelCase_ )
for i in range(len(lowerCAmelCase_ ) ):
UpperCAmelCase_ : List[Any] = x[i] / self.k
self.sk += prk
UpperCAmelCase_ : Optional[Any] = (self.L - 1) * self.sk
if self.rem != 0:
UpperCAmelCase_ : Any = int(last % last )
UpperCAmelCase_ : List[str] = int(last + 1 if self.rem >= 0.5 else last )
self.last_list.append(lowerCAmelCase_ )
UpperCAmelCase_ : Optional[Any] = int(np.ma.count(self.img ) / self.img[1].size )
UpperCAmelCase_ : Dict = self.img[1].size
for i in range(self.number_of_cols ):
for j in range(self.number_of_rows ):
UpperCAmelCase_ : Any = self.img[j][i]
if num != self.last_list[num]:
UpperCAmelCase_ : Tuple = self.last_list[num]
cva.imwrite("output_data/output.jpg" , self.img )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
plt.hist(self.img.ravel() , 256 , [0, 256] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
cva.imshow("Output-Image" , self.img )
cva.imshow("Input-Image" , self.original_image )
cva.waitKey(5_000 )
cva.destroyAllWindows()
if __name__ == "__main__":
lowerCamelCase_ = os.path.join(os.path.basename(__file__), '''image_data/input.jpg''')
lowerCamelCase_ = ConstantStretch()
stretcher.stretch(file_path)
stretcher.plot_histogram()
stretcher.show_image()
| 268 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a_ : List[Any] = {"""processing_layoutxlm""": ["""LayoutXLMProcessor"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : int = ["""LayoutXLMTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = ["""LayoutXLMTokenizerFast"""]
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
a_ : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 6 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
a_ : Tuple = logging.get_logger(__name__)
a_ : int = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
a_ : Tuple = {
"""vocab_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-ctx_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-ctx_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
a_ : Union[str, Any] = {
"""vocab_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-question_encoder-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-question_encoder-multiset-base""": (
"""https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"""
),
},
}
a_ : str = {
"""vocab_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""facebook/dpr-reader-single-nq-base""": (
"""https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"""
),
"""facebook/dpr-reader-multiset-base""": (
"""https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"""
),
},
}
a_ : int = {
"""facebook/dpr-ctx_encoder-single-nq-base""": 5_12,
"""facebook/dpr-ctx_encoder-multiset-base""": 5_12,
}
a_ : List[Any] = {
"""facebook/dpr-question_encoder-single-nq-base""": 5_12,
"""facebook/dpr-question_encoder-multiset-base""": 5_12,
}
a_ : Optional[Any] = {
"""facebook/dpr-reader-single-nq-base""": 5_12,
"""facebook/dpr-reader-multiset-base""": 5_12,
}
a_ : Optional[int] = {
"""facebook/dpr-ctx_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-ctx_encoder-multiset-base""": {"""do_lower_case""": True},
}
a_ : List[str] = {
"""facebook/dpr-question_encoder-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-question_encoder-multiset-base""": {"""do_lower_case""": True},
}
a_ : Dict = {
"""facebook/dpr-reader-single-nq-base""": {"""do_lower_case""": True},
"""facebook/dpr-reader-multiset-base""": {"""do_lower_case""": True},
}
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : List[Any] =VOCAB_FILES_NAMES
lowercase : Any =CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[Any] =CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] =CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowercase : Dict =DPRContextEncoderTokenizer
class __UpperCamelCase ( lowerCamelCase__ ):
lowercase : Optional[int] =VOCAB_FILES_NAMES
lowercase : List[Any] =QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase : Optional[Any] =QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[Any] =QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
lowercase : List[Any] =DPRQuestionEncoderTokenizer
a_ : Union[str, Any] = collections.namedtuple(
"""DPRSpanPrediction""", ["""span_score""", """relevance_score""", """doc_id""", """start_index""", """end_index""", """text"""]
)
a_ : Dict = collections.namedtuple("""DPRReaderOutput""", ["""start_logits""", """end_logits""", """relevance_logits"""])
a_ : Dict = R"""
Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.
It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),
using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`
with the format:
[CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>
Args:
questions (`str` or `List[str]`):
The questions to be encoded. You can specify one question for many passages. In this case, the question
will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in
`titles` or `texts`.
titles (`str` or `List[str]`):
The passages titles to be encoded. This can be a string or a list of strings if there are several passages.
texts (`str` or `List[str]`):
The passages texts to be encoded. This can be a string or a list of strings if there are several passages.
padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):
Activates and controls padding. Accepts the following values:
- `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence
if provided).
- `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided.
- `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different
lengths).
truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):
Activates and controls truncation. Accepts the following values:
- `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to
the maximum acceptable input length for the model if that argument is not provided. This will truncate
token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch
of pairs) is provided.
- `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the first
sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum
acceptable input length for the model if that argument is not provided. This will only truncate the
second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.
- `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths
greater than the model maximum admissible input size).
max_length (`int`, *optional*):
Controls the maximum length to use by one of the truncation/padding parameters.
If left unset or set to `None`, this will use the predefined model maximum length if a maximum length
is required by one of the truncation/padding parameters. If the model has no specific maximum input
length (like XLNet) truncation/padding to a maximum length will be deactivated.
return_tensors (`str` or [`~utils.TensorType`], *optional*):
If set, will return tensors instead of list of python integers. Acceptable values are:
- `'tf'`: Return TensorFlow `tf.constant` objects.
- `'pt'`: Return PyTorch `torch.Tensor` objects.
- `'np'`: Return Numpy `np.ndarray` objects.
return_attention_mask (`bool`, *optional*):
Whether or not to return the attention mask. If not set, will return the attention mask according to the
specific tokenizer's default, defined by the `return_outputs` attribute.
[What are attention masks?](../glossary#attention-mask)
Return:
`Dict[str, List[List[int]]]`: A dictionary with the following keys:
- `input_ids`: List of token ids to be fed to a model.
- `attention_mask`: List of indices specifying which tokens should be attended to by the model.
"""
@add_start_docstrings(lowerCamelCase__ )
class __UpperCamelCase :
def __call__( self, lowerCAmelCase, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = False, lowerCAmelCase = False, lowerCAmelCase = None, lowerCAmelCase = None, lowerCAmelCase = None, **lowerCAmelCase, ):
"""simple docstring"""
if titles is None and texts is None:
return super().__call__(
lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase, return_attention_mask=lowerCAmelCase, **lowerCAmelCase, )
elif titles is None or texts is None:
lowerCamelCase_ =titles if texts is None else texts
return super().__call__(
lowerCAmelCase, lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase, return_attention_mask=lowerCAmelCase, **lowerCAmelCase, )
lowerCamelCase_ =titles if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [titles]
lowerCamelCase_ =texts if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [texts]
lowerCamelCase_ =len(lowerCAmelCase )
lowerCamelCase_ =questions if not isinstance(lowerCAmelCase, lowerCAmelCase ) else [questions] * n_passages
assert len(lowerCAmelCase ) == len(
lowerCAmelCase ), f'''There should be as many titles than texts but got {len(lowerCAmelCase )} titles and {len(lowerCAmelCase )} texts.'''
lowerCamelCase_ =super().__call__(lowerCAmelCase, lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase )['''input_ids''']
lowerCamelCase_ =super().__call__(lowerCAmelCase, add_special_tokens=lowerCAmelCase, padding=lowerCAmelCase, truncation=lowerCAmelCase )['''input_ids''']
lowerCamelCase_ ={
'''input_ids''': [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(lowerCAmelCase, lowerCAmelCase )
]
}
if return_attention_mask is not False:
lowerCamelCase_ =[]
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
lowerCamelCase_ =attention_mask
return self.pad(lowerCAmelCase, padding=lowerCAmelCase, max_length=lowerCAmelCase, return_tensors=lowerCAmelCase )
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = 16, lowerCAmelCase = 64, lowerCAmelCase = 4, ):
"""simple docstring"""
lowerCamelCase_ =reader_input['''input_ids''']
lowerCamelCase_, lowerCamelCase_, lowerCamelCase_ =reader_output[:3]
lowerCamelCase_ =len(lowerCAmelCase )
lowerCamelCase_ =sorted(range(lowerCAmelCase ), reverse=lowerCAmelCase, key=relevance_logits.__getitem__ )
lowerCamelCase_ =[]
for doc_id in sorted_docs:
lowerCamelCase_ =list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
lowerCamelCase_ =sequence_ids.index(self.sep_token_id, 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
lowerCamelCase_ =sequence_ids.index(self.pad_token_id )
else:
lowerCamelCase_ =len(lowerCAmelCase )
lowerCamelCase_ =self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len], end_logits=end_logits[doc_id][passage_offset:sequence_len], max_answer_length=lowerCAmelCase, top_spans=lowerCAmelCase, )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index], relevance_score=relevance_logits[doc_id], doc_id=lowerCAmelCase, start_index=lowerCAmelCase, end_index=lowerCAmelCase, text=self.decode(sequence_ids[start_index : end_index + 1] ), ) )
if len(lowerCAmelCase ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowercase__ ( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
lowerCamelCase_ =[]
for start_index, start_score in enumerate(lowerCAmelCase ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
lowerCamelCase_ =sorted(lowerCAmelCase, key=lambda lowerCAmelCase : x[1], reverse=lowerCAmelCase )
lowerCamelCase_ =[]
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'''Wrong span indices: [{start_index}:{end_index}]'''
lowerCamelCase_ =end_index - start_index + 1
assert length <= max_answer_length, f'''Span is too long: {length} > {max_answer_length}'''
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(lowerCAmelCase ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(lowerCamelCase__ )
class __UpperCamelCase ( lowerCamelCase__ , lowerCamelCase__ ):
lowercase : int =VOCAB_FILES_NAMES
lowercase : Tuple =READER_PRETRAINED_VOCAB_FILES_MAP
lowercase : Tuple =READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase : List[str] =READER_PRETRAINED_INIT_CONFIGURATION
lowercase : int =['input_ids', 'attention_mask']
lowercase : Dict =DPRReaderTokenizer
| 6 | 1 |
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class _snake_case :
def __init__( self : List[Any] ,SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : int=13 ,SCREAMING_SNAKE_CASE__ : str=32 ,SCREAMING_SNAKE_CASE__ : Union[str, Any]=3 ,SCREAMING_SNAKE_CASE__ : Any=4 ,SCREAMING_SNAKE_CASE__ : List[str]=[10, 20, 30, 40] ,SCREAMING_SNAKE_CASE__ : int=[2, 2, 3, 2] ,SCREAMING_SNAKE_CASE__ : List[Any]=True ,SCREAMING_SNAKE_CASE__ : List[Any]=True ,SCREAMING_SNAKE_CASE__ : Tuple=37 ,SCREAMING_SNAKE_CASE__ : int="gelu" ,SCREAMING_SNAKE_CASE__ : Any=10 ,SCREAMING_SNAKE_CASE__ : List[Any]=0.02 ,SCREAMING_SNAKE_CASE__ : Any=["stage2", "stage3", "stage4"] ,SCREAMING_SNAKE_CASE__ : Optional[int]=3 ,SCREAMING_SNAKE_CASE__ : Tuple=None ,):
SCREAMING_SNAKE_CASE:List[str] = parent
SCREAMING_SNAKE_CASE:Any = batch_size
SCREAMING_SNAKE_CASE:Optional[Any] = image_size
SCREAMING_SNAKE_CASE:Any = num_channels
SCREAMING_SNAKE_CASE:List[Any] = num_stages
SCREAMING_SNAKE_CASE:int = hidden_sizes
SCREAMING_SNAKE_CASE:List[str] = depths
SCREAMING_SNAKE_CASE:Dict = is_training
SCREAMING_SNAKE_CASE:List[str] = use_labels
SCREAMING_SNAKE_CASE:str = intermediate_size
SCREAMING_SNAKE_CASE:Union[str, Any] = hidden_act
SCREAMING_SNAKE_CASE:Optional[Any] = type_sequence_label_size
SCREAMING_SNAKE_CASE:int = initializer_range
SCREAMING_SNAKE_CASE:str = out_features
SCREAMING_SNAKE_CASE:str = num_labels
SCREAMING_SNAKE_CASE:Optional[int] = scope
SCREAMING_SNAKE_CASE:Any = num_stages
def __UpperCamelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE:Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
SCREAMING_SNAKE_CASE:Optional[Any] = None
if self.use_labels:
SCREAMING_SNAKE_CASE:str = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
SCREAMING_SNAKE_CASE:Tuple = self.get_config()
return config, pixel_values, labels
def __UpperCamelCase ( self : Dict ):
return ConvNextConfig(
num_channels=self.num_channels ,num_stages=self.num_stages ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,is_training=self.is_training ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,out_features=self.out_features ,)
def __UpperCamelCase ( self : str ):
return UperNetConfig(
backbone_config=self.get_backbone_config() ,hidden_size=512 ,pool_scales=[1, 2, 3, 6] ,use_auxiliary_head=SCREAMING_SNAKE_CASE__ ,auxiliary_loss_weight=0.4 ,auxiliary_in_channels=40 ,auxiliary_channels=256 ,auxiliary_num_convs=1 ,auxiliary_concat_input=SCREAMING_SNAKE_CASE__ ,loss_ignore_index=255 ,num_labels=self.num_labels ,)
def __UpperCamelCase ( self : Dict ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : List[str] ,SCREAMING_SNAKE_CASE__ : str ):
SCREAMING_SNAKE_CASE:int = UperNetForSemanticSegmentation(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
SCREAMING_SNAKE_CASE:List[str] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size, self.image_size) )
def __UpperCamelCase ( self : Any ):
SCREAMING_SNAKE_CASE:Optional[int] = self.prepare_config_and_inputs()
(
(
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) , (
SCREAMING_SNAKE_CASE
) ,
):int = config_and_inputs
SCREAMING_SNAKE_CASE:Dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class _snake_case ( _a , _a , unittest.TestCase ):
_A : Any = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
_A : Optional[int] = {'''image-segmentation''': UperNetForSemanticSegmentation} if is_torch_available() else {}
_A : Any = False
_A : Tuple = False
_A : Optional[int] = False
_A : Any = False
_A : List[Any] = False
_A : Optional[int] = False
def __UpperCamelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE:Tuple = UperNetModelTester(self )
SCREAMING_SNAKE_CASE:str = ConfigTester(self ,config_class=SCREAMING_SNAKE_CASE__ ,has_text_modality=SCREAMING_SNAKE_CASE__ ,hidden_size=37 )
def __UpperCamelCase ( self : Tuple ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCamelCase ( self : Optional[Any] ):
return
def __UpperCamelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE:Optional[int] = model_class(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
SCREAMING_SNAKE_CASE:Optional[int] = [*signature.parameters.keys()]
SCREAMING_SNAKE_CASE:str = ["pixel_values"]
self.assertListEqual(arg_names[:1] ,SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE:int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE__ )
@unittest.skip(reason="UperNet does not use inputs_embeds" )
def __UpperCamelCase ( self : List[Any] ):
pass
@unittest.skip(reason="UperNet does not support input and output embeddings" )
def __UpperCamelCase ( self : List[str] ):
pass
@unittest.skip(reason="UperNet does not have a base model" )
def __UpperCamelCase ( self : Optional[int] ):
pass
@unittest.skip(reason="UperNet does not have a base model" )
def __UpperCamelCase ( self : str ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="UperNet has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def __UpperCamelCase ( self : Union[str, Any] ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def __UpperCamelCase ( self : str ):
pass
def __UpperCamelCase ( self : Dict ):
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ : int ,SCREAMING_SNAKE_CASE__ : List[Any] ,SCREAMING_SNAKE_CASE__ : Any ):
SCREAMING_SNAKE_CASE:Any = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
SCREAMING_SNAKE_CASE:Dict = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ) )
SCREAMING_SNAKE_CASE:List[str] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
SCREAMING_SNAKE_CASE:str = self.model_tester.num_stages
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) ,expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE:Any = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
SCREAMING_SNAKE_CASE:List[Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : List[Any] ):
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE:List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
SCREAMING_SNAKE_CASE:str = _config_zero_init(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
SCREAMING_SNAKE_CASE:Any = model_class(config=SCREAMING_SNAKE_CASE__ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() ,[0.0, 1.0] ,msg=F'''Parameter {name} of model {model_class} seems not properly initialized''' ,)
@unittest.skip(reason="UperNet does not have tied weights" )
def __UpperCamelCase ( self : Union[str, Any] ):
pass
@slow
def __UpperCamelCase ( self : str ):
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
SCREAMING_SNAKE_CASE:Union[str, Any] = UperNetForSemanticSegmentation.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def A_ ( ):
SCREAMING_SNAKE_CASE:Tuple = hf_hub_download(
repo_id="hf-internal-testing/fixtures_ade20k" , repo_type="dataset" , filename="ADE_val_00000001.jpg" )
SCREAMING_SNAKE_CASE:Union[str, Any] = Image.open(snake_case ).convert("RGB" )
return image
@require_torch
@require_vision
@slow
class _snake_case ( unittest.TestCase ):
def __UpperCamelCase ( self : List[str] ):
SCREAMING_SNAKE_CASE:Any = AutoImageProcessor.from_pretrained("openmmlab/upernet-swin-tiny" )
SCREAMING_SNAKE_CASE:List[str] = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-swin-tiny" ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Tuple = prepare_img()
SCREAMING_SNAKE_CASE:Dict = processor(images=SCREAMING_SNAKE_CASE__ ,return_tensors="pt" ).to(SCREAMING_SNAKE_CASE__ )
with torch.no_grad():
SCREAMING_SNAKE_CASE:Union[str, Any] = model(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:List[Any] = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:List[Any] = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1e-4 ) )
def __UpperCamelCase ( self : int ):
SCREAMING_SNAKE_CASE:Tuple = AutoImageProcessor.from_pretrained("openmmlab/upernet-convnext-tiny" )
SCREAMING_SNAKE_CASE:Tuple = UperNetForSemanticSegmentation.from_pretrained("openmmlab/upernet-convnext-tiny" ).to(SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Optional[Any] = prepare_img()
SCREAMING_SNAKE_CASE:Any = processor(images=SCREAMING_SNAKE_CASE__ ,return_tensors="pt" ).to(SCREAMING_SNAKE_CASE__ )
with torch.no_grad():
SCREAMING_SNAKE_CASE:str = model(**SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:List[Any] = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Dict = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3] ,SCREAMING_SNAKE_CASE__ ,atol=1e-4 ) )
| 139 |
'''simple docstring'''
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case ( _a , unittest.TestCase ):
_A : str = CTRLTokenizer
_A : List[str] = False
_A : int = False
def __UpperCamelCase ( self : Tuple ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
SCREAMING_SNAKE_CASE:Dict = ["adapt", "re@@", "a@@", "apt", "c@@", "t", "<unk>"]
SCREAMING_SNAKE_CASE:Optional[Any] = dict(zip(SCREAMING_SNAKE_CASE__ ,range(len(SCREAMING_SNAKE_CASE__ ) ) ) )
SCREAMING_SNAKE_CASE:str = ["#version: 0.2", "a p", "ap t</w>", "r e", "a d", "ad apt</w>", ""]
SCREAMING_SNAKE_CASE:Union[str, Any] = {"unk_token": "<unk>"}
SCREAMING_SNAKE_CASE:Any = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["vocab_file"] )
SCREAMING_SNAKE_CASE:Union[str, Any] = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file ,"w" ,encoding="utf-8" ) as fp:
fp.write(json.dumps(SCREAMING_SNAKE_CASE__ ) + "\n" )
with open(self.merges_file ,"w" ,encoding="utf-8" ) as fp:
fp.write("\n".join(SCREAMING_SNAKE_CASE__ ) )
def __UpperCamelCase ( self : Union[str, Any] ,**SCREAMING_SNAKE_CASE__ : Any ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname ,**SCREAMING_SNAKE_CASE__ )
def __UpperCamelCase ( self : int ,SCREAMING_SNAKE_CASE__ : Any ):
SCREAMING_SNAKE_CASE:Optional[Any] = "adapt react readapt apt"
SCREAMING_SNAKE_CASE:Tuple = "adapt react readapt apt"
return input_text, output_text
def __UpperCamelCase ( self : Tuple ):
SCREAMING_SNAKE_CASE:List[str] = CTRLTokenizer(self.vocab_file ,self.merges_file ,**self.special_tokens_map )
SCREAMING_SNAKE_CASE:Any = "adapt react readapt apt"
SCREAMING_SNAKE_CASE:Any = "adapt re@@ a@@ c@@ t re@@ adapt apt".split()
SCREAMING_SNAKE_CASE:Union[str, Any] = tokenizer.tokenize(SCREAMING_SNAKE_CASE__ )
self.assertListEqual(SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE:Union[str, Any] = tokens + [tokenizer.unk_token]
SCREAMING_SNAKE_CASE:Optional[int] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(SCREAMING_SNAKE_CASE__ ) ,SCREAMING_SNAKE_CASE__ )
| 139 | 1 |
'''simple docstring'''
import argparse
import os
from accelerate.test_utils import execute_subprocess_async
def a__ ( lowercase : Any=None ) -> List[Any]:
"""simple docstring"""
if subparsers is not None:
_UpperCamelCase = subparsers.add_parser('''test''' )
else:
_UpperCamelCase = argparse.ArgumentParser('''Accelerate test command''' )
parser.add_argument(
'''--config_file''', default=lowercase, help=(
'''The path to use to store the config file. Will default to a file named default_config.yaml in the cache '''
'''location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '''
'''such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '''
'''with \'huggingface\'.'''
), )
if subparsers is not None:
parser.set_defaults(func=lowercase )
return parser
def a__ ( lowercase : Tuple ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase = os.path.sep.join(__file__.split(os.path.sep )[:-2] + ['''test_utils''', '''scripts''', '''test_script.py'''] )
if args.config_file is None:
_UpperCamelCase = script_name
else:
_UpperCamelCase = F"""--config_file={args.config_file} {script_name}"""
_UpperCamelCase = ['''accelerate-launch'''] + test_args.split()
_UpperCamelCase = execute_subprocess_async(lowercase, env=os.environ.copy() )
if result.returncode == 0:
print('''Test is a success! You are ready for your distributed training!''' )
def a__ ( ) -> List[Any]:
"""simple docstring"""
_UpperCamelCase = test_command_parser()
_UpperCamelCase = parser.parse_args()
test_command(lowercase )
if __name__ == "__main__":
main()
| 368 |
'''simple docstring'''
from cva import destroyAllWindows, imread, imshow, waitKey
def a__ ( lowercase : str ) -> Optional[int]:
"""simple docstring"""
_UpperCamelCase , _UpperCamelCase = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(lowercase ):
for j in range(lowercase ):
_UpperCamelCase = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
lowercase__ : Optional[int] = imread('image_data/lena.jpg', 1)
# convert to its negative
lowercase__ : Union[str, Any] = convert_to_negative(img)
# show result image
imshow('negative of original image', img)
waitKey(0)
destroyAllWindows()
| 287 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( ) -> Tuple:
"""simple docstring"""
_SCREAMING_SNAKE_CASE =[31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
_SCREAMING_SNAKE_CASE =6
_SCREAMING_SNAKE_CASE =1
_SCREAMING_SNAKE_CASE =19_01
_SCREAMING_SNAKE_CASE =0
while year < 20_01:
day += 7
if (year % 4 == 0 and year % 1_00 != 0) or (year % 4_00 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
_SCREAMING_SNAKE_CASE =day - days_per_month[month - 2]
elif day > 29 and month == 2:
month += 1
_SCREAMING_SNAKE_CASE =day - 29
else:
if day > days_per_month[month - 1]:
month += 1
_SCREAMING_SNAKE_CASE =day - days_per_month[month - 2]
if month > 12:
year += 1
_SCREAMING_SNAKE_CASE =1
if year < 20_01 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution())
| 47 |
def __a ( ) -> list[list[int]]:
'''simple docstring'''
return [list(range(1_0_0_0 - i , -1_0_0_0 - i , -1 ) ) for i in range(1_0_0_0 )]
A_ : Union[str, Any] = generate_large_matrix()
A_ : Union[str, Any] = (
[[4, 3, 2, -1], [3, 2, 1, -1], [1, 1, -1, -2], [-1, -1, -2, -3]],
[[3, 2], [1, 0]],
[[7, 7, 6]],
[[7, 7, 6], [-1, -2, -3]],
grid,
)
def __a ( SCREAMING_SNAKE_CASE ) -> None:
'''simple docstring'''
assert all(row == sorted(SCREAMING_SNAKE_CASE , reverse=SCREAMING_SNAKE_CASE ) for row in grid )
assert all(list(SCREAMING_SNAKE_CASE ) == sorted(SCREAMING_SNAKE_CASE , reverse=SCREAMING_SNAKE_CASE ) for col in zip(*SCREAMING_SNAKE_CASE ) )
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__UpperCAmelCase = 0
__UpperCAmelCase = len(SCREAMING_SNAKE_CASE ) - 1
# Edge cases such as no values or all numbers are negative.
if not array or array[0] < 0:
return 0
while right + 1 > left:
__UpperCAmelCase = (left + right) // 2
__UpperCAmelCase = array[mid]
# Num must be negative and the index must be greater than or equal to 0.
if num < 0 and array[mid - 1] >= 0:
return mid
if num >= 0:
__UpperCAmelCase = mid + 1
else:
__UpperCAmelCase = mid - 1
# No negative numbers so return the last index of the array + 1 which is the length.
return len(SCREAMING_SNAKE_CASE )
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__UpperCAmelCase = 0
__UpperCAmelCase = len(grid[0] )
for i in range(len(SCREAMING_SNAKE_CASE ) ):
__UpperCAmelCase = find_negative_index(grid[i][:bound] )
total += bound
return (len(SCREAMING_SNAKE_CASE ) * len(grid[0] )) - total
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
return len([number for row in grid for number in row if number < 0] )
def __a ( SCREAMING_SNAKE_CASE ) -> int:
'''simple docstring'''
__UpperCAmelCase = 0
for row in grid:
for i, number in enumerate(SCREAMING_SNAKE_CASE ):
if number < 0:
total += len(SCREAMING_SNAKE_CASE ) - i
break
return total
def __a ( ) -> None:
'''simple docstring'''
from timeit import timeit
print('''Running benchmarks''' )
__UpperCAmelCase = (
'''from __main__ import count_negatives_binary_search, '''
'''count_negatives_brute_force, count_negatives_brute_force_with_break, grid'''
)
for func in (
"count_negatives_binary_search", # took 0.7727 seconds
"count_negatives_brute_force_with_break", # took 4.6505 seconds
"count_negatives_brute_force", # took 12.8160 seconds
):
__UpperCAmelCase = timeit(f'''{func}(grid=grid)''' , setup=SCREAMING_SNAKE_CASE , number=5_0_0 )
print(f'''{func}() took {time:0.4f} seconds''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 333 | 0 |
"""simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
A_ = {
'''<''': operator.lt,
'''<=''': operator.le,
'''==''': operator.eq,
'''!=''': operator.ne,
'''>=''': operator.ge,
'''>''': operator.gt,
}
def UpperCAmelCase__ (snake_case__ : Dict , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Optional[Any] , snake_case__ : Tuple ):
"""simple docstring"""
if got_ver is None or want_ver is None:
raise ValueError(
F"Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider"
F" reinstalling {pkg}." )
if not ops[op](version.parse(snake_case__ ) , version.parse(snake_case__ ) ):
raise ImportError(
F"{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}" )
def UpperCAmelCase__ (snake_case__ : str , snake_case__ : Optional[str] = None ):
"""simple docstring"""
_snake_case : str = F"\n{hint}" if hint is not None else """"""
# non-versioned check
if re.match(R"""^[\w_\-\d]+$""" , snake_case__ ):
_snake_case , _snake_case , _snake_case : Union[str, Any] = requirement, None, None
else:
_snake_case : Optional[int] = re.findall(R"""^([^!=<>\s]+)([\s!=<>]{1,2}.+)""" , snake_case__ )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but"""
F" got {requirement}" )
_snake_case , _snake_case : List[Any] = match[0]
_snake_case : int = want_full.split(""",""" ) # there could be multiple requirements
_snake_case : Dict = {}
for w in want_range:
_snake_case : List[Any] = re.findall(R"""^([\s!=<>]{1,2})(.+)""" , snake_case__ )
if not match:
raise ValueError(
"""requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,"""
F" but got {requirement}" )
_snake_case , _snake_case : List[Any] = match[0]
_snake_case : List[str] = want_ver
if op not in ops:
raise ValueError(F"{requirement}: need one of {list(ops.keys() )}, but got {op}" )
# special case
if pkg == "python":
_snake_case : List[Any] = """.""".join([str(snake_case__ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
return
# check if any version is installed
try:
_snake_case : int = importlib.metadata.version(snake_case__ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F"The '{requirement}' distribution was not found and is required by this application. {hint}" )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ , snake_case__ )
def UpperCAmelCase__ (snake_case__ : Dict ):
"""simple docstring"""
_snake_case : int = """Try: pip install transformers -U or pip install -e '.[dev]' if you're working with git main"""
return require_version(snake_case__ , snake_case__ )
| 132 |
"""simple docstring"""
def UpperCAmelCase__ (snake_case__ : int = 10_00 ):
"""simple docstring"""
_snake_case , _snake_case : List[Any] = 1, 1
_snake_case : str = []
for i in range(1 , n + 1 ):
_snake_case : Any = prev_numerator + 2 * prev_denominator
_snake_case : Optional[Any] = prev_numerator + prev_denominator
if len(str(snake_case__ ) ) > len(str(snake_case__ ) ):
result.append(snake_case__ )
_snake_case : int = numerator
_snake_case : Any = denominator
return len(snake_case__ )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 132 | 1 |
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
lowerCamelCase__ : Any = logging.get_logger(__name__)
lowerCamelCase__ : int = {
'post_extract_proj': 'feature_projection.projection',
'encoder.pos_conv.0': 'encoder.pos_conv_embed.conv',
'self_attn.k_proj': 'encoder.layers.*.attention.k_proj',
'self_attn.v_proj': 'encoder.layers.*.attention.v_proj',
'self_attn.q_proj': 'encoder.layers.*.attention.q_proj',
'self_attn.out_proj': 'encoder.layers.*.attention.out_proj',
'self_attn_layer_norm': 'encoder.layers.*.layer_norm',
'fc1': 'encoder.layers.*.feed_forward.intermediate_dense',
'fc2': 'encoder.layers.*.feed_forward.output_dense',
'final_layer_norm': 'encoder.layers.*.final_layer_norm',
'encoder.layer_norm': 'encoder.layer_norm',
'adapter_layer': 'encoder.layers.*.adapter_layer',
'w2v_model.layer_norm': 'feature_projection.layer_norm',
'quantizer.weight_proj': 'quantizer.weight_proj',
'quantizer.vars': 'quantizer.codevectors',
'project_q': 'project_q',
'final_proj': 'project_hid',
'w2v_encoder.proj': 'lm_head',
'mask_emb': 'masked_spec_embed',
'pooling_layer.linear': 'projector',
'pooling_layer.projection': 'classifier',
}
lowerCamelCase__ : int = [
'lm_head',
'quantizer.weight_proj',
'quantizer.codevectors',
'project_q',
'project_hid',
'projector',
'classifier',
]
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = {}
with open(__UpperCAmelCase , 'r' ) as file:
for line_number, line in enumerate(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = line.strip()
if line:
SCREAMING_SNAKE_CASE_ = line.split()
SCREAMING_SNAKE_CASE_ = line_number
SCREAMING_SNAKE_CASE_ = words[0]
SCREAMING_SNAKE_CASE_ = value
return result
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Optional[int] ) -> int:
for attribute in key.split('.' ):
SCREAMING_SNAKE_CASE_ = getattr(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = PARAM_MAPPING[full_name.split('.' )[-1]]
SCREAMING_SNAKE_CASE_ = 'param'
if weight_type is not None and weight_type != "param":
SCREAMING_SNAKE_CASE_ = getattr(__UpperCAmelCase , __UpperCAmelCase ).shape
elif weight_type is not None and weight_type == "param":
SCREAMING_SNAKE_CASE_ = hf_pointer
for attribute in hf_param_name.split('.' ):
SCREAMING_SNAKE_CASE_ = getattr(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = shape_pointer.shape
# let's reduce dimension
SCREAMING_SNAKE_CASE_ = value[0]
else:
SCREAMING_SNAKE_CASE_ = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
f" {value.shape} for {full_name}" )
if weight_type == "weight":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "weight_g":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "weight_v":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "bias":
SCREAMING_SNAKE_CASE_ = value
elif weight_type == "param":
for attribute in hf_param_name.split('.' ):
SCREAMING_SNAKE_CASE_ = getattr(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = value
else:
SCREAMING_SNAKE_CASE_ = value
logger.info(f"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] , __UpperCAmelCase : List[str] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[str] , __UpperCAmelCase : List[Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__UpperCAmelCase ):
SCREAMING_SNAKE_CASE_ = PARAM_MAPPING[full_name.split('.' )[-1]]
SCREAMING_SNAKE_CASE_ = 'param'
if weight_type is not None and weight_type != "param":
SCREAMING_SNAKE_CASE_ = '.'.join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
SCREAMING_SNAKE_CASE_ = '.'.join([key, hf_param_name] )
else:
SCREAMING_SNAKE_CASE_ = key
SCREAMING_SNAKE_CASE_ = value if 'lm_head' in full_key else value[0]
lowerCamelCase__ : Dict = {
'W_a': 'linear_1.weight',
'W_b': 'linear_2.weight',
'b_a': 'linear_1.bias',
'b_b': 'linear_2.bias',
'ln_W': 'norm.weight',
'ln_b': 'norm.bias',
}
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict , __UpperCAmelCase : str=None , __UpperCAmelCase : str=None ) -> Optional[Any]:
SCREAMING_SNAKE_CASE_ = False
for key, mapped_key in MAPPING.items():
SCREAMING_SNAKE_CASE_ = 'wav2vec2.' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
SCREAMING_SNAKE_CASE_ = True
if "*" in mapped_key:
SCREAMING_SNAKE_CASE_ = name.split(__UpperCAmelCase )[0].split('.' )[-2]
SCREAMING_SNAKE_CASE_ = mapped_key.replace('*' , __UpperCAmelCase )
if "weight_g" in name:
SCREAMING_SNAKE_CASE_ = 'weight_g'
elif "weight_v" in name:
SCREAMING_SNAKE_CASE_ = 'weight_v'
elif "bias" in name:
SCREAMING_SNAKE_CASE_ = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
SCREAMING_SNAKE_CASE_ = 'weight'
else:
SCREAMING_SNAKE_CASE_ = None
if hf_dict is not None:
rename_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
set_recursively(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return is_used
return is_used
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : List[str] , __UpperCAmelCase : Union[str, Any] ) -> List[Any]:
SCREAMING_SNAKE_CASE_ = []
SCREAMING_SNAKE_CASE_ = fairseq_model.state_dict()
SCREAMING_SNAKE_CASE_ = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
SCREAMING_SNAKE_CASE_ = False
if "conv_layers" in name:
load_conv_layer(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , hf_model.config.feat_extract_norm == 'group' , )
SCREAMING_SNAKE_CASE_ = True
else:
SCREAMING_SNAKE_CASE_ = load_wavaveca_layer(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if not is_used:
unused_weights.append(__UpperCAmelCase )
logger.warning(f"Unused weights: {unused_weights}" )
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] , __UpperCAmelCase : Any , __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Dict , __UpperCAmelCase : List[str] ) -> Tuple:
SCREAMING_SNAKE_CASE_ = full_name.split('conv_layers.' )[-1]
SCREAMING_SNAKE_CASE_ = name.split('.' )
SCREAMING_SNAKE_CASE_ = int(items[0] )
SCREAMING_SNAKE_CASE_ = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found." )
SCREAMING_SNAKE_CASE_ = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found." )
SCREAMING_SNAKE_CASE_ = value
logger.info(f"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found." )
SCREAMING_SNAKE_CASE_ = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"{full_name} has size {value.shape}, but"
f" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found." )
SCREAMING_SNAKE_CASE_ = value
logger.info(f"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(__UpperCAmelCase )
@torch.no_grad()
def UpperCAmelCase_ ( __UpperCAmelCase : List[str] , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Any=None , __UpperCAmelCase : Optional[Any]=None , __UpperCAmelCase : List[Any]=True , __UpperCAmelCase : List[str]=False ) -> Tuple:
if config_path is not None:
SCREAMING_SNAKE_CASE_ = WavaVecaConfig.from_pretrained(__UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE_ = WavaVecaConfig()
if is_seq_class:
SCREAMING_SNAKE_CASE_ = read_txt_into_dict(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = idalabel
SCREAMING_SNAKE_CASE_ = WavaVecaForSequenceClassification(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , )
feature_extractor.save_pretrained(__UpperCAmelCase )
elif is_finetuned:
if dict_path:
SCREAMING_SNAKE_CASE_ = Dictionary.load(__UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
SCREAMING_SNAKE_CASE_ = target_dict.pad_index
SCREAMING_SNAKE_CASE_ = target_dict.bos_index
SCREAMING_SNAKE_CASE_ = target_dict.eos_index
SCREAMING_SNAKE_CASE_ = len(target_dict.symbols )
SCREAMING_SNAKE_CASE_ = os.path.join(__UpperCAmelCase , 'vocab.json' )
if not os.path.isdir(__UpperCAmelCase ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(__UpperCAmelCase ) )
return
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = target_dict.indices
# fairseq has the <pad> and <s> switched
SCREAMING_SNAKE_CASE_ = 0
SCREAMING_SNAKE_CASE_ = 1
with open(__UpperCAmelCase , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = WavaVecaCTCTokenizer(
__UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=__UpperCAmelCase , )
SCREAMING_SNAKE_CASE_ = True if config.feat_extract_norm == 'layer' else False
SCREAMING_SNAKE_CASE_ = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_60_00 , padding_value=0 , do_normalize=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , )
SCREAMING_SNAKE_CASE_ = WavaVecaProcessor(feature_extractor=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = WavaVecaForCTC(__UpperCAmelCase )
else:
SCREAMING_SNAKE_CASE_ = WavaVecaForPreTraining(__UpperCAmelCase )
if is_finetuned or is_seq_class:
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
SCREAMING_SNAKE_CASE_ = argparse.Namespace(task='audio_pretraining' )
SCREAMING_SNAKE_CASE_ = fairseq.tasks.setup_task(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = model[0].eval()
recursively_load_weights(__UpperCAmelCase , __UpperCAmelCase , not is_finetuned )
hf_wavavec.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : Tuple = argparse.ArgumentParser()
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to fairseq checkpoint')
parser.add_argument('--dict_path', default=None, type=str, help='Path to dict of fine-tuned model')
parser.add_argument('--config_path', default=None, type=str, help='Path to hf config.json of model to convert')
parser.add_argument(
'--not_finetuned', action='store_true', help='Whether the model to convert is a fine-tuned model or not'
)
parser.add_argument(
'--is_seq_class',
action='store_true',
help='Whether the model to convert is a fine-tuned sequence classification model or not',
)
lowerCamelCase__ : Any = parser.parse_args()
lowerCamelCase__ : Optional[Any] = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
) | 225 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTImageProcessor, ViTMSNConfig, ViTMSNModel
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
torch.set_grad_enabled(False)
def UpperCAmelCase_ ( __UpperCAmelCase : Any , __UpperCAmelCase : str=False ) -> List[str]:
SCREAMING_SNAKE_CASE_ = []
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((f"module.blocks.{i}.norm1.weight", f"vit.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((f"module.blocks.{i}.norm1.bias", f"vit.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(f"module.blocks.{i}.attn.proj.weight", f"vit.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.attn.proj.bias", f"vit.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append((f"module.blocks.{i}.norm2.weight", f"vit.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((f"module.blocks.{i}.norm2.bias", f"vit.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.weight", f"vit.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc1.bias", f"vit.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.weight", f"vit.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((f"module.blocks.{i}.mlp.fc2.bias", f"vit.encoder.layer.{i}.output.dense.bias") )
# projection layer + position embeddings
rename_keys.extend(
[
('module.cls_token', 'vit.embeddings.cls_token'),
('module.patch_embed.proj.weight', 'vit.embeddings.patch_embeddings.projection.weight'),
('module.patch_embed.proj.bias', 'vit.embeddings.patch_embeddings.projection.bias'),
('module.pos_embed', 'vit.embeddings.position_embeddings'),
] )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
('module.norm.weight', 'layernorm.weight'),
('module.norm.bias', 'layernorm.bias'),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
SCREAMING_SNAKE_CASE_ = [(pair[0], pair[1][4:]) if pair[1].startswith('vit' ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
('norm.weight', 'vit.layernorm.weight'),
('norm.bias', 'vit.layernorm.bias'),
('head.weight', 'classifier.weight'),
('head.bias', 'classifier.bias'),
] )
return rename_keys
def UpperCAmelCase_ ( __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Union[str, Any] , __UpperCAmelCase : Optional[Any]=False ) -> Any:
for i in range(config.num_hidden_layers ):
if base_model:
SCREAMING_SNAKE_CASE_ = ''
else:
SCREAMING_SNAKE_CASE_ = 'vit.'
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
SCREAMING_SNAKE_CASE_ = state_dict.pop(f"module.blocks.{i}.attn.qkv.weight" )
SCREAMING_SNAKE_CASE_ = state_dict.pop(f"module.blocks.{i}.attn.qkv.bias" )
# next, add query, keys and values (in that order) to the state dict
SCREAMING_SNAKE_CASE_ = in_proj_weight[
: config.hidden_size, :
]
SCREAMING_SNAKE_CASE_ = in_proj_bias[: config.hidden_size]
SCREAMING_SNAKE_CASE_ = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
SCREAMING_SNAKE_CASE_ = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
SCREAMING_SNAKE_CASE_ = in_proj_weight[
-config.hidden_size :, :
]
SCREAMING_SNAKE_CASE_ = in_proj_bias[-config.hidden_size :]
def UpperCAmelCase_ ( __UpperCAmelCase : List[Any] ) -> Union[str, Any]:
SCREAMING_SNAKE_CASE_ = ['head.weight', 'head.bias']
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] ) -> str:
# projection head is used in the self-supervised pre-training in MSN,
# for downstream task it's not needed.
SCREAMING_SNAKE_CASE_ = [
'module.fc.fc1.weight',
'module.fc.fc1.bias',
'module.fc.bn1.weight',
'module.fc.bn1.bias',
'module.fc.bn1.running_mean',
'module.fc.bn1.running_var',
'module.fc.bn1.num_batches_tracked',
'module.fc.fc2.weight',
'module.fc.fc2.bias',
'module.fc.bn2.weight',
'module.fc.bn2.bias',
'module.fc.bn2.running_mean',
'module.fc.bn2.running_var',
'module.fc.bn2.num_batches_tracked',
'module.fc.fc3.weight',
'module.fc.fc3.bias',
]
for k in ignore_keys:
state_dict.pop(__UpperCAmelCase , __UpperCAmelCase )
def UpperCAmelCase_ ( __UpperCAmelCase : Dict , __UpperCAmelCase : Any , __UpperCAmelCase : Any ) -> Dict:
SCREAMING_SNAKE_CASE_ = dct.pop(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = val
def UpperCAmelCase_ ( __UpperCAmelCase : Optional[int] , __UpperCAmelCase : Tuple ) -> List[str]:
SCREAMING_SNAKE_CASE_ = ViTMSNConfig()
SCREAMING_SNAKE_CASE_ = 10_00
SCREAMING_SNAKE_CASE_ = 'datasets/huggingface/label-files'
SCREAMING_SNAKE_CASE_ = 'imagenet-1k-id2label.json'
SCREAMING_SNAKE_CASE_ = json.load(open(hf_hub_download(__UpperCAmelCase , __UpperCAmelCase ) , 'r' ) )
SCREAMING_SNAKE_CASE_ = {int(__UpperCAmelCase ): v for k, v in idalabel.items()}
SCREAMING_SNAKE_CASE_ = idalabel
SCREAMING_SNAKE_CASE_ = {v: k for k, v in idalabel.items()}
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = 3_84
SCREAMING_SNAKE_CASE_ = 15_36
SCREAMING_SNAKE_CASE_ = 6
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = 10_24
SCREAMING_SNAKE_CASE_ = 40_96
SCREAMING_SNAKE_CASE_ = 24
SCREAMING_SNAKE_CASE_ = 16
SCREAMING_SNAKE_CASE_ = 0.1
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = 4
elif "l7" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = 7
SCREAMING_SNAKE_CASE_ = 10_24
SCREAMING_SNAKE_CASE_ = 40_96
SCREAMING_SNAKE_CASE_ = 24
SCREAMING_SNAKE_CASE_ = 16
SCREAMING_SNAKE_CASE_ = 0.1
SCREAMING_SNAKE_CASE_ = ViTMSNModel(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = torch.hub.load_state_dict_from_url(__UpperCAmelCase , map_location='cpu' )['target_encoder']
SCREAMING_SNAKE_CASE_ = ViTImageProcessor(size=config.image_size )
remove_projection_head(__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = create_rename_keys(__UpperCAmelCase , base_model=__UpperCAmelCase )
for src, dest in rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
read_in_q_k_v(__UpperCAmelCase , __UpperCAmelCase , base_model=__UpperCAmelCase )
model.load_state_dict(__UpperCAmelCase )
model.eval()
SCREAMING_SNAKE_CASE_ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
SCREAMING_SNAKE_CASE_ = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw )
SCREAMING_SNAKE_CASE_ = ViTImageProcessor(
size=config.image_size , image_mean=__UpperCAmelCase , image_std=__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = image_processor(images=__UpperCAmelCase , return_tensors='pt' )
# forward pass
torch.manual_seed(2 )
SCREAMING_SNAKE_CASE_ = model(**__UpperCAmelCase )
SCREAMING_SNAKE_CASE_ = outputs.last_hidden_state
# The following Colab Notebook was used to generate these outputs:
# https://colab.research.google.com/gist/sayakpaul/3672419a04f5997827503fd84079bdd1/scratchpad.ipynb
if "s16" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = torch.tensor([[-1.0_9_1_5, -1.4_8_7_6, -1.1_8_0_9]] )
elif "b16" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = torch.tensor([[1_4.2_8_8_9, -1_8.9_0_4_5, 1_1.7_2_8_1]] )
elif "l16" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = torch.tensor([[4_1.5_0_2_8, -2_2.8_6_8_1, 4_5.6_4_7_5]] )
elif "b4" in checkpoint_url:
SCREAMING_SNAKE_CASE_ = torch.tensor([[-4.3_8_6_8, 5.2_9_3_2, -0.4_1_3_7]] )
else:
SCREAMING_SNAKE_CASE_ = torch.tensor([[-0.1_7_9_2, -0.6_4_6_5, 2.4_2_6_3]] )
# verify logits
assert torch.allclose(last_hidden_state[:, 0, :3] , __UpperCAmelCase , atol=1E-4 )
print(f"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(__UpperCAmelCase )
print(f"Saving image processor to {pytorch_dump_folder_path}" )
image_processor.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
lowerCamelCase__ : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://dl.fbaipublicfiles.com/msn/vits16_800ep.pth.tar',
type=str,
help='URL of the checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
lowerCamelCase__ : Union[str, Any] = parser.parse_args()
convert_vit_msn_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 225 | 1 |
"""simple docstring"""
import inspect
import unittest
from transformers import MobileViTConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTForImageClassification, MobileViTForSemanticSegmentation, MobileViTModel
from transformers.models.mobilevit.modeling_mobilevit import MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowercase__ ( snake_case__ ):
def UpperCAmelCase__ ( self : str ):
lowerCamelCase_ : List[str] =self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case__ , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(snake_case__ , "neck_hidden_sizes" ) )
self.parent.assertTrue(hasattr(snake_case__ , "num_attention_heads" ) )
class lowercase__ :
def __init__( self : Dict , snake_case__ : List[str] , snake_case__ : List[str]=13 , snake_case__ : Dict=32 , snake_case__ : Tuple=2 , snake_case__ : int=3 , snake_case__ : Dict=640 , snake_case__ : Optional[Any]=4 , snake_case__ : str="silu" , snake_case__ : str=3 , snake_case__ : Optional[Any]=32 , snake_case__ : Tuple=0.1 , snake_case__ : Tuple=0.1 , snake_case__ : Dict=0.1 , snake_case__ : Any=0.02 , snake_case__ : int=True , snake_case__ : Tuple=True , snake_case__ : Any=10 , snake_case__ : List[Any]=None , ):
lowerCamelCase_ : List[Any] =parent
lowerCamelCase_ : Any =batch_size
lowerCamelCase_ : Any =image_size
lowerCamelCase_ : Tuple =patch_size
lowerCamelCase_ : List[Any] =num_channels
lowerCamelCase_ : int =last_hidden_size
lowerCamelCase_ : Any =num_attention_heads
lowerCamelCase_ : Tuple =hidden_act
lowerCamelCase_ : Any =conv_kernel_size
lowerCamelCase_ : Optional[int] =output_stride
lowerCamelCase_ : str =hidden_dropout_prob
lowerCamelCase_ : int =attention_probs_dropout_prob
lowerCamelCase_ : Tuple =classifier_dropout_prob
lowerCamelCase_ : Optional[Any] =use_labels
lowerCamelCase_ : List[str] =is_training
lowerCamelCase_ : Tuple =num_labels
lowerCamelCase_ : List[Any] =initializer_range
lowerCamelCase_ : int =scope
def UpperCAmelCase__ ( self : Optional[int] ):
lowerCamelCase_ : str =floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCamelCase_ : int =None
lowerCamelCase_ : str =None
if self.use_labels:
lowerCamelCase_ : Union[str, Any] =ids_tensor([self.batch_size] , self.num_labels )
lowerCamelCase_ : int =ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCamelCase_ : int =self.get_config()
return config, pixel_values, labels, pixel_labels
def UpperCAmelCase__ ( self : str ):
return MobileViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : Union[str, Any] , snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : str , snake_case__ : Tuple ):
lowerCamelCase_ : Tuple =MobileViTModel(config=snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCamelCase_ : Tuple =model(snake_case__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCAmelCase__ ( self : int , snake_case__ : Optional[int] , snake_case__ : List[Any] , snake_case__ : Union[str, Any] , snake_case__ : List[Any] ):
lowerCamelCase_ : Optional[Any] =self.num_labels
lowerCamelCase_ : List[str] =MobileViTForImageClassification(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCamelCase_ : Any =model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : int , snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : Dict ):
lowerCamelCase_ : Optional[int] =self.num_labels
lowerCamelCase_ : Union[str, Any] =MobileViTForSemanticSegmentation(snake_case__ )
model.to(snake_case__ )
model.eval()
lowerCamelCase_ : Tuple =model(snake_case__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
lowerCamelCase_ : List[Any] =model(snake_case__ , labels=snake_case__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def UpperCAmelCase__ ( self : Optional[Any] ):
lowerCamelCase_ : Tuple =self.prepare_config_and_inputs()
lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ : Optional[Any] =config_and_inputs
lowerCamelCase_ : Optional[Any] ={"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( snake_case__, snake_case__, unittest.TestCase ):
_UpperCAmelCase :Union[str, Any] = (
(MobileViTModel, MobileViTForImageClassification, MobileViTForSemanticSegmentation)
if is_torch_available()
else ()
)
_UpperCAmelCase :List[Any] = (
{
"feature-extraction": MobileViTModel,
"image-classification": MobileViTForImageClassification,
"image-segmentation": MobileViTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_UpperCAmelCase :int = False
_UpperCAmelCase :Any = False
_UpperCAmelCase :Tuple = False
_UpperCAmelCase :Tuple = False
def UpperCAmelCase__ ( self : str ):
lowerCamelCase_ : str =MobileViTModelTester(self )
lowerCamelCase_ : Optional[int] =MobileViTConfigTester(self , config_class=snake_case__ , has_text_modality=snake_case__ )
def UpperCAmelCase__ ( self : Dict ):
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViT does not use inputs_embeds" )
def UpperCAmelCase__ ( self : Optional[int] ):
pass
@unittest.skip(reason="MobileViT does not support input and output embeddings" )
def UpperCAmelCase__ ( self : List[Any] ):
pass
@unittest.skip(reason="MobileViT does not output attentions" )
def UpperCAmelCase__ ( self : Union[str, Any] ):
pass
def UpperCAmelCase__ ( self : List[str] ):
lowerCamelCase_ , lowerCamelCase_ : Dict =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ : Union[str, Any] =model_class(snake_case__ )
lowerCamelCase_ : str =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCamelCase_ : Union[str, Any] =[*signature.parameters.keys()]
lowerCamelCase_ : str =["pixel_values"]
self.assertListEqual(arg_names[:1] , snake_case__ )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def UpperCAmelCase__ ( self : List[Any] ):
pass
def UpperCAmelCase__ ( self : Tuple ):
lowerCamelCase_ : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case__ )
def UpperCAmelCase__ ( self : int ):
def check_hidden_states_output(snake_case__ : List[Any] , snake_case__ : Any , snake_case__ : Any ):
lowerCamelCase_ : Any =model_class(snake_case__ )
model.to(snake_case__ )
model.eval()
with torch.no_grad():
lowerCamelCase_ : Optional[Any] =model(**self._prepare_for_class(snake_case__ , snake_case__ ) )
lowerCamelCase_ : str =outputs.hidden_states
lowerCamelCase_ : Dict =5
self.assertEqual(len(snake_case__ ) , snake_case__ )
# MobileViT's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
lowerCamelCase_ : Union[str, Any] =2
for i in range(len(snake_case__ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
lowerCamelCase_ , lowerCamelCase_ : Optional[Any] =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCamelCase_ : str =True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCamelCase_ : List[Any] =True
check_hidden_states_output(snake_case__ , snake_case__ , snake_case__ )
def UpperCAmelCase__ ( self : Union[str, Any] ):
lowerCamelCase_ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case__ )
def UpperCAmelCase__ ( self : Any ):
lowerCamelCase_ : Optional[int] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*snake_case__ )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
for model_name in MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase_ : Union[str, Any] =MobileViTModel.from_pretrained(snake_case__ )
self.assertIsNotNone(snake_case__ )
def _snake_case ( ) -> Tuple:
lowerCamelCase_ : int =Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : Optional[Any] ):
return MobileViTImageProcessor.from_pretrained("apple/mobilevit-xx-small" ) if is_vision_available() else None
@slow
def UpperCAmelCase__ ( self : int ):
lowerCamelCase_ : Dict =MobileViTForImageClassification.from_pretrained("apple/mobilevit-xx-small" ).to(snake_case__ )
lowerCamelCase_ : Any =self.default_image_processor
lowerCamelCase_ : Any =prepare_img()
lowerCamelCase_ : int =image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
lowerCamelCase_ : Union[str, Any] =model(**snake_case__ )
# verify the logits
lowerCamelCase_ : int =torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , snake_case__ )
lowerCamelCase_ : Optional[int] =torch.tensor([-1.9_364, -1.2_327, -0.4_653] ).to(snake_case__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case__ , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : Dict ):
lowerCamelCase_ : Tuple =MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
lowerCamelCase_ : Tuple =model.to(snake_case__ )
lowerCamelCase_ : Any =MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
lowerCamelCase_ : Dict =prepare_img()
lowerCamelCase_ : Optional[int] =image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
lowerCamelCase_ : str =model(**snake_case__ )
lowerCamelCase_ : List[Any] =outputs.logits
# verify the logits
lowerCamelCase_ : Optional[int] =torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape , snake_case__ )
lowerCamelCase_ : Optional[int] =torch.tensor(
[
[[6.9_713, 6.9_786, 7.2_422], [7.2_893, 7.2_825, 7.4_446], [7.6_580, 7.8_797, 7.9_420]],
[[-10.6_869, -10.3_250, -10.3_471], [-10.4_228, -9.9_868, -9.7_132], [-11.0_405, -11.0_221, -10.7_318]],
[[-3.3_089, -2.8_539, -2.6_740], [-3.2_706, -2.5_621, -2.5_108], [-3.2_534, -2.6_615, -2.6_651]],
] , device=snake_case__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
lowerCamelCase_ : Any =MobileViTForSemanticSegmentation.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
lowerCamelCase_ : Tuple =model.to(snake_case__ )
lowerCamelCase_ : List[str] =MobileViTImageProcessor.from_pretrained("apple/deeplabv3-mobilevit-xx-small" )
lowerCamelCase_ : Tuple =prepare_img()
lowerCamelCase_ : List[Any] =image_processor(images=snake_case__ , return_tensors="pt" ).to(snake_case__ )
# forward pass
with torch.no_grad():
lowerCamelCase_ : str =model(**snake_case__ )
lowerCamelCase_ : int =outputs.logits.detach().cpu()
lowerCamelCase_ : Optional[Any] =image_processor.post_process_semantic_segmentation(outputs=snake_case__ , target_sizes=[(50, 60)] )
lowerCamelCase_ : Dict =torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape , snake_case__ )
lowerCamelCase_ : Optional[int] =image_processor.post_process_semantic_segmentation(outputs=snake_case__ )
lowerCamelCase_ : Optional[Any] =torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape , snake_case__ )
| 209 |
"""simple docstring"""
from __future__ import annotations
from collections import namedtuple
def _snake_case ( lowerCamelCase__ : float , lowerCamelCase__ : float , lowerCamelCase__ : float ) -> tuple:
lowerCamelCase_ : Optional[Any] =namedtuple("result" , "name value" )
if (voltage, current, power).count(0 ) != 1:
raise ValueError("Only one argument must be 0" )
elif power < 0:
raise ValueError(
"Power cannot be negative in any electrical/electronics system" )
elif voltage == 0:
return result("voltage" , power / current )
elif current == 0:
return result("current" , power / voltage )
elif power == 0:
return result("power" , float(round(abs(voltage * current ) , 2 ) ) )
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 209 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a_ : Union[str, Any] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Any = ['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
a_ : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 55 |
'''simple docstring'''
import jax.numpy as jnp
from ...utils import logging
from ..ta.modeling_flax_ta import FlaxTaEncoderModel, FlaxTaForConditionalGeneration, FlaxTaModel
from .configuration_mta import MTaConfig
__lowercase : Dict = logging.get_logger(__name__)
__lowercase : str = '''T5Config'''
def lowercase_ ( _lowercase , _lowercase , _lowercase ) -> jnp.ndarray:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = jnp.zeros_like(_lowercase )
lowerCamelCase_ : Any = shifted_input_ids.at[:, 1:].set(input_ids[:, :-1] )
lowerCamelCase_ : List[str] = shifted_input_ids.at[:, 0].set(_lowercase )
lowerCamelCase_ : Tuple = jnp.where(shifted_input_ids == -100 , _lowercase , _lowercase )
return shifted_input_ids
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[int] = "mt5"
lowerCamelCase : Dict = MTaConfig
class __lowercase ( _lowercase ):
lowerCamelCase : Tuple = "mt5"
lowerCamelCase : int = MTaConfig
class __lowercase ( _lowercase ):
lowerCamelCase : Optional[int] = "mt5"
lowerCamelCase : Union[str, Any] = MTaConfig
| 318 | 0 |
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class __snake_case ( unittest.TestCase ):
def __a ( self : Dict ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = logging.get_logger()
# the current default level is logging.WARNING
SCREAMING_SNAKE_CASE__ = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(_lowercase )
def __a ( self : Union[str, Any] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = logging.get_verbosity()
SCREAMING_SNAKE_CASE__ = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
SCREAMING_SNAKE_CASE__ = """Testing 1, 2, 3"""
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(_lowercase ) as cl:
logger.warning(_lowercase )
self.assertEqual(cl.out , msg + """\n""" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(_lowercase ) as cl:
logger.warning(_lowercase )
self.assertEqual(cl.out , """""" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(_lowercase ) as cl:
logger.warning(_lowercase )
self.assertEqual(cl.out , msg + """\n""" )
# restore to the original level
logging.set_verbosity(_lowercase )
@mockenv(TRANSFORMERS_VERBOSITY="""error""" )
def __a ( self : List[str] ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
SCREAMING_SNAKE_CASE__ = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
SCREAMING_SNAKE_CASE__ = os.getenv("""TRANSFORMERS_VERBOSITY""" , _lowercase )
SCREAMING_SNAKE_CASE__ = logging.log_levels[env_level_str]
SCREAMING_SNAKE_CASE__ = logging.get_verbosity()
self.assertEqual(
_lowercase , _lowercase , f"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , )
# restore to the original level
SCREAMING_SNAKE_CASE__ = """"""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="""super-error""" )
def __a ( self : Tuple ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
SCREAMING_SNAKE_CASE__ = logging.logging.getLogger()
with CaptureLogger(_lowercase ) as cl:
# this action activates the env var
logging.get_logger("""transformers.models.bart.tokenization_bart""" )
self.assertIn("""Unknown option TRANSFORMERS_VERBOSITY=super-error""" , cl.out )
# no need to restore as nothing was changed
def __a ( self : int ):
"""simple docstring"""
transformers.utils.logging._reset_library_root_logger()
SCREAMING_SNAKE_CASE__ = logging.get_logger("""transformers.models.bart.tokenization_bart""" )
SCREAMING_SNAKE_CASE__ = """Testing 1, 2, 3"""
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""1""" ):
# nothing should be logged as env var disables this method
with CaptureLogger(_lowercase ) as cl:
logger.warning_advice(_lowercase )
self.assertEqual(cl.out , """""" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="""""" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(_lowercase ) as cl:
logger.warning_advice(_lowercase )
self.assertEqual(cl.out , msg + """\n""" )
def __SCREAMING_SNAKE_CASE ( ) -> int:
"""simple docstring"""
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 204 | def __SCREAMING_SNAKE_CASE ( __UpperCamelCase : Union[str, Any] ) -> int:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ = len(__UpperCamelCase )
for i in range(length - 1 ):
SCREAMING_SNAKE_CASE__ = i
for k in range(i + 1 , __UpperCamelCase ):
if collection[k] < collection[least]:
SCREAMING_SNAKE_CASE__ = k
if least != i:
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = (collection[i], collection[least])
return collection
if __name__ == "__main__":
__lowerCamelCase : Any = input('''Enter numbers separated by a comma:\n''').strip()
__lowerCamelCase : Dict = [int(item) for item in user_input.split(''',''')]
print(selection_sort(unsorted))
| 204 | 1 |
'''simple docstring'''
import argparse
import dataclasses
import json
import logging
import os
import shutil
from typing import List, Optional
import datasets
from accelerate import Accelerator
from datasets import load_dataset
from finetuning import finetune
from tqdm.auto import tqdm
import transformers
from transformers import AutoConfig, set_seed
from transformers.trainer_utils import IntervalStrategy
__lowercase : List[Any] = logging.getLogger(__name__)
__lowercase : List[Any] = '''pytorch_model.bin'''
@dataclasses.dataclass
class __lowercase :
lowerCamelCase : str = dataclasses.field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models."} )
lowerCamelCase : Optional[str] = dataclasses.field(
default=_lowercase , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co."} , )
@dataclasses.dataclass
class __lowercase :
lowerCamelCase : str = dataclasses.field(metadata={"help": "A csv or a json file containing the training data."} )
lowerCamelCase : str = dataclasses.field(metadata={"help": "A csv or a json file containing the data to predict on."} )
lowerCamelCase : Optional[str] = dataclasses.field(
default=_lowercase , metadata={"help": "A csv or a json file containing the validation data."} )
lowerCamelCase : Optional[str] = dataclasses.field(
default=_lowercase , metadata={"help": "The name of the task to train on."} , )
lowerCamelCase : Optional[List[str]] = dataclasses.field(
default=_lowercase , metadata={"help": "The list of labels for the task."} )
@dataclasses.dataclass
class __lowercase :
lowerCamelCase : str = dataclasses.field(
metadata={"help": "The output directory where the model predictions and checkpoints will be written."} )
lowerCamelCase : Optional[str] = dataclasses.field(
default="accuracy" , metadata={"help": "The evaluation metric used for the task."} )
lowerCamelCase : Optional[str] = dataclasses.field(
default="no" , metadata={
"help": "The evaluation strategy to adopt during training. Possible values are: [\"no\", \"step\", \"epoch]"
} , )
lowerCamelCase : Optional[int] = dataclasses.field(
default=10 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
lowerCamelCase : Optional[float] = dataclasses.field(
default=0.0 , metadata={
"help": "How much the specified evaluation metric must improve to satisfy early stopping conditions."
} , )
lowerCamelCase : Optional[bool] = dataclasses.field(
default=_lowercase , metadata={"help": "Whether to filter the pseudo-labeled data based on the confidence score."} , )
lowerCamelCase : Optional[bool] = dataclasses.field(
default=_lowercase , metadata={"help": "Whether to filter the pseudo-labeled data based on the validation performance."} , )
lowerCamelCase : Optional[bool] = dataclasses.field(
default=_lowercase , metadata={"help": "Whether to fine-tune on labeled data after pseudo training."} , )
lowerCamelCase : Optional[float] = dataclasses.field(
default=0.0 , metadata={"help": "Confidence threshold for pseudo-labeled data filtering."} , )
lowerCamelCase : Optional[int] = dataclasses.field(
default=100 , metadata={"help": "Number of evaluation calls with no improvement after which training will be stopped."} , )
lowerCamelCase : Optional[int] = dataclasses.field(
default=_lowercase , metadata={"help": "Random seed for initialization."} , )
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase ) -> Any:
'''simple docstring'''
lowerCamelCase_ : int = datasets.concatenate_datasets([infer_input, infer_output] , axis=1 )
if args.do_filter_by_confidence:
lowerCamelCase_ : List[str] = dataset.filter(lambda _lowercase : example["probability"] > args.confidence_threshold )
if args.do_filter_by_val_performance:
assert eval_result >= 0.0 and eval_result <= 1.0
lowerCamelCase_ : Dict = int(eval_result * len(_lowercase ) )
print(_lowercase )
lowerCamelCase_ : Optional[int] = dataset.sort('''probability''' , reverse=_lowercase )
lowerCamelCase_ : List[Any] = dataset.select(range(_lowercase ) )
lowerCamelCase_ : Optional[int] = dataset.remove_columns(['''label''', '''probability'''] )
lowerCamelCase_ : int = dataset.rename_column('''prediction''' , '''label''' )
lowerCamelCase_ : List[Any] = dataset.map(lambda _lowercase : {"label": idalabel[example["label"]]} )
lowerCamelCase_ : int = dataset.shuffle(seed=args.seed )
lowerCamelCase_ : List[Any] = os.path.join(_lowercase , F"""train_pseudo.{args.data_file_extension}""" )
if args.data_file_extension == "csv":
dataset.to_csv(_lowercase , index=_lowercase )
else:
dataset.to_json(_lowercase )
def lowercase_ ( _lowercase , _lowercase , _lowercase , _lowercase , **_lowercase ) -> Optional[int]:
'''simple docstring'''
lowerCamelCase_ : Tuple = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(accelerator.state )
# Setup logging, we only want one process per machine to log things on the
# screen. accelerator.is_local_main_process is only True for one process per
# machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
lowerCamelCase_ : str = STModelArguments(model_name_or_path=_lowercase )
lowerCamelCase_ : Dict = STDataArguments(train_file=_lowercase , infer_file=_lowercase )
lowerCamelCase_ : Dict = STTrainingArguments(output_dir=_lowercase )
lowerCamelCase_ : int = argparse.Namespace()
for arg_class in (model_args, data_args, training_args):
for key, value in vars(_lowercase ).items():
setattr(_lowercase , _lowercase , _lowercase )
for key, value in kwargs.items():
if hasattr(_lowercase , _lowercase ):
setattr(_lowercase , _lowercase , _lowercase )
# Sanity checks
lowerCamelCase_ : str = {}
lowerCamelCase_ : Tuple = None
# You need to provide the training data and the data to predict on
assert args.train_file is not None
assert args.infer_file is not None
lowerCamelCase_ : Dict = args.train_file
lowerCamelCase_ : List[str] = args.infer_file
if args.evaluation_strategy != IntervalStrategy.NO.value:
assert args.eval_file is not None
lowerCamelCase_ : str = args.eval_file
for key in data_files:
lowerCamelCase_ : Union[str, Any] = data_files[key].split('''.''' )[-1]
assert extension in ["csv", "json"], F"""`{key}_file` should be a csv or a json file."""
if args.data_file_extension is None:
lowerCamelCase_ : Optional[int] = extension
else:
assert extension == args.data_file_extension, F"""`{key}_file` should be a {args.data_file_extension} file`."""
assert (
args.eval_metric in datasets.list_metrics()
), F"""{args.eval_metric} not in the list of supported metrics {datasets.list_metrics()}."""
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed )
logger.info('''Creating the initial data directory for self-training...''' )
lowerCamelCase_ : List[str] = F"""{args.output_dir}/self-train_iter-{{}}""".format
lowerCamelCase_ : str = data_dir_format(0 )
if accelerator.is_main_process:
if args.output_dir is not None:
os.makedirs(args.output_dir , exist_ok=_lowercase )
os.makedirs(_lowercase , exist_ok=_lowercase )
accelerator.wait_for_everyone()
lowerCamelCase_ : str = None
lowerCamelCase_ : int = None
lowerCamelCase_ : Any = 0
lowerCamelCase_ : str = False
# Show the progress bar
lowerCamelCase_ : Union[str, Any] = tqdm(range(args.max_selftrain_iterations ) , disable=not accelerator.is_local_main_process )
# Self-train
for iteration in range(0 , int(args.max_selftrain_iterations ) ):
lowerCamelCase_ : Tuple = data_dir_format(_lowercase )
assert os.path.exists(_lowercase )
# Stage 1: initial fine-tuning for iteration = 0 or pseudo-training for
# iteration > 0
lowerCamelCase_ : Dict = os.path.join(_lowercase , '''stage-1''' )
lowerCamelCase_ : Dict = {
'''accelerator''': accelerator,
'''model_name_or_path''': args.model_name_or_path,
'''cache_dir''': args.cache_dir,
'''do_train''': True,
'''train_file''': data_files['''train'''] if iteration == 0 else data_files['''train_pseudo'''],
'''do_eval''': True if args.eval_file is not None else False,
'''eval_file''': data_files['''eval'''],
'''do_predict''': True,
'''infer_file''': data_files['''infer'''],
'''task_name''': args.task_name,
'''label_list''': args.label_list,
'''output_dir''': current_output_dir,
'''eval_metric''': args.eval_metric,
'''evaluation_strategy''': args.evaluation_strategy,
'''early_stopping_patience''': args.early_stopping_patience,
'''early_stopping_threshold''': args.early_stopping_threshold,
'''seed''': args.seed,
}
# Add additional training arguments
for key, value in kwargs.items():
if key not in arguments_dict and not hasattr(_lowercase , _lowercase ):
arguments_dict.update({key: value} )
lowerCamelCase_ : Optional[Any] = os.path.join(_lowercase , '''best-checkpoint''' , _lowercase )
if os.path.exists(_lowercase ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 1.''' , _lowercase , _lowercase , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 1 *****''' , _lowercase )
finetune(**_lowercase )
accelerator.wait_for_everyone()
assert os.path.exists(_lowercase )
logger.info('''Self-training job completed: iteration: %d, stage: 1.''' , _lowercase )
if iteration > 0 and args.finetune_on_labeled_data:
# Stage 2 (optional): fine-tuning on the original labeled data
lowerCamelCase_ : Optional[int] = os.path.join(_lowercase , '''best-checkpoint''' )
lowerCamelCase_ : Optional[Any] = os.path.join(_lowercase , '''stage-2''' )
# Update arguments_dict
lowerCamelCase_ : int = model_path
lowerCamelCase_ : List[str] = data_files['''train''']
lowerCamelCase_ : List[Any] = current_output_dir
lowerCamelCase_ : Optional[Any] = os.path.join(_lowercase , '''best-checkpoint''' , _lowercase )
if os.path.exists(_lowercase ):
logger.info(
'''Found existing model checkpoint at %s. Skipping self-training: iteration: %d, stage: 2.''' , _lowercase , _lowercase , )
else:
logger.info('''***** Running self-training: iteration: %d, stage: 2 *****''' , _lowercase )
finetune(**_lowercase )
accelerator.wait_for_everyone()
assert os.path.exists(_lowercase )
logger.info('''Self-training job completed: iteration: %d, stage: 2.''' , _lowercase )
lowerCamelCase_ : Tuple = iteration
lowerCamelCase_ : List[str] = data_dir_format(iteration + 1 )
lowerCamelCase_ : Optional[int] = AutoConfig.from_pretrained(os.path.join(_lowercase , '''best-checkpoint''' ) )
lowerCamelCase_ : Any = config.idalabel
lowerCamelCase_ : Dict = os.path.join(_lowercase , '''eval_results_best-checkpoint.json''' )
lowerCamelCase_ : str = os.path.join(_lowercase , '''test_results_best-checkpoint.json''' )
assert os.path.exists(_lowercase )
with open(_lowercase , '''r''' ) as f:
lowerCamelCase_ : Optional[Any] = float(json.load(_lowercase )[args.eval_metric] )
lowerCamelCase_ : Any = os.path.join(_lowercase , '''infer_output_best-checkpoint.csv''' )
assert os.path.exists(_lowercase )
# Loading the dataset from local csv or json files.
lowerCamelCase_ : Union[str, Any] = load_dataset(args.data_file_extension , data_files={'''data''': data_files['''infer''']} )['''data''']
lowerCamelCase_ : int = load_dataset('''csv''' , data_files={'''data''': infer_output_file} )['''data''']
if accelerator.is_main_process:
os.makedirs(_lowercase , exist_ok=_lowercase )
shutil.copy(_lowercase , os.path.join(_lowercase , F"""eval_results_iter-{iteration}.json""" ) )
if os.path.exists(_lowercase ):
shutil.copy(_lowercase , os.path.join(_lowercase , F"""test_results_iter-{iteration}.json""" ) )
create_pseudo_labeled_data(_lowercase , _lowercase , _lowercase , _lowercase , _lowercase , _lowercase )
accelerator.wait_for_everyone()
lowerCamelCase_ : Any = os.path.join(_lowercase , F"""train_pseudo.{args.data_file_extension}""" )
if args.evaluation_strategy != IntervalStrategy.NO.value:
lowerCamelCase_ : List[str] = eval_result
if best_iteration is None:
lowerCamelCase_ : Union[str, Any] = new_iteration
lowerCamelCase_ : str = new_eval_result
else:
if new_eval_result - best_eval_result > args.early_stopping_threshold:
lowerCamelCase_ : Dict = new_iteration
lowerCamelCase_ : Any = new_eval_result
lowerCamelCase_ : Tuple = 0
else:
if new_eval_result == best_eval_result:
lowerCamelCase_ : List[str] = new_iteration
lowerCamelCase_ : Optional[Any] = new_eval_result
early_stopping_patience_counter += 1
if early_stopping_patience_counter >= args.early_stopping_patience:
lowerCamelCase_ : int = True
progress_bar.update(1 )
if should_training_stop:
break
if best_iteration is not None:
# Save the best iteration
logger.info('''Best iteration: %d''' , _lowercase )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , _lowercase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_lowercase , F"""eval_results_iter-{iteration}.json""" ) , os.path.join(_lowercase , '''eval_results_best-iteration.json''' ) , )
else:
# Assume that the last iteration is the best
logger.info('''Best iteration: %d''' , args.max_selftrain_iterations - 1 )
logger.info('''Best evaluation result: %s = %f''' , args.eval_metric , _lowercase )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
shutil.copy(
os.path.join(_lowercase , F"""eval_results_iter-{args.max_selftrain_iterations - 1}.json""" ) , os.path.join(_lowercase , '''eval_results_best-iteration.json''' ) , )
| 318 |
'''simple docstring'''
import importlib
import json
import os
from collections import OrderedDict
from typing import Dict, Optional, Union
# Build the list of all feature extractors
from ...configuration_utils import PretrainedConfig
from ...dynamic_module_utils import get_class_from_dynamic_module, resolve_trust_remote_code
from ...feature_extraction_utils import FeatureExtractionMixin
from ...utils import CONFIG_NAME, FEATURE_EXTRACTOR_NAME, get_file_from_repo, logging
from .auto_factory import _LazyAutoMapping
from .configuration_auto import (
CONFIG_MAPPING_NAMES,
AutoConfig,
model_type_to_module_name,
replace_list_option_in_docstrings,
)
__lowercase : Any = logging.get_logger(__name__)
__lowercase : Any = OrderedDict(
[
('''audio-spectrogram-transformer''', '''ASTFeatureExtractor'''),
('''beit''', '''BeitFeatureExtractor'''),
('''chinese_clip''', '''ChineseCLIPFeatureExtractor'''),
('''clap''', '''ClapFeatureExtractor'''),
('''clip''', '''CLIPFeatureExtractor'''),
('''clipseg''', '''ViTFeatureExtractor'''),
('''conditional_detr''', '''ConditionalDetrFeatureExtractor'''),
('''convnext''', '''ConvNextFeatureExtractor'''),
('''cvt''', '''ConvNextFeatureExtractor'''),
('''data2vec-audio''', '''Wav2Vec2FeatureExtractor'''),
('''data2vec-vision''', '''BeitFeatureExtractor'''),
('''deformable_detr''', '''DeformableDetrFeatureExtractor'''),
('''deit''', '''DeiTFeatureExtractor'''),
('''detr''', '''DetrFeatureExtractor'''),
('''dinat''', '''ViTFeatureExtractor'''),
('''donut-swin''', '''DonutFeatureExtractor'''),
('''dpt''', '''DPTFeatureExtractor'''),
('''encodec''', '''EncodecFeatureExtractor'''),
('''flava''', '''FlavaFeatureExtractor'''),
('''glpn''', '''GLPNFeatureExtractor'''),
('''groupvit''', '''CLIPFeatureExtractor'''),
('''hubert''', '''Wav2Vec2FeatureExtractor'''),
('''imagegpt''', '''ImageGPTFeatureExtractor'''),
('''layoutlmv2''', '''LayoutLMv2FeatureExtractor'''),
('''layoutlmv3''', '''LayoutLMv3FeatureExtractor'''),
('''levit''', '''LevitFeatureExtractor'''),
('''maskformer''', '''MaskFormerFeatureExtractor'''),
('''mctct''', '''MCTCTFeatureExtractor'''),
('''mobilenet_v1''', '''MobileNetV1FeatureExtractor'''),
('''mobilenet_v2''', '''MobileNetV2FeatureExtractor'''),
('''mobilevit''', '''MobileViTFeatureExtractor'''),
('''nat''', '''ViTFeatureExtractor'''),
('''owlvit''', '''OwlViTFeatureExtractor'''),
('''perceiver''', '''PerceiverFeatureExtractor'''),
('''poolformer''', '''PoolFormerFeatureExtractor'''),
('''regnet''', '''ConvNextFeatureExtractor'''),
('''resnet''', '''ConvNextFeatureExtractor'''),
('''segformer''', '''SegformerFeatureExtractor'''),
('''sew''', '''Wav2Vec2FeatureExtractor'''),
('''sew-d''', '''Wav2Vec2FeatureExtractor'''),
('''speech_to_text''', '''Speech2TextFeatureExtractor'''),
('''speecht5''', '''SpeechT5FeatureExtractor'''),
('''swiftformer''', '''ViTFeatureExtractor'''),
('''swin''', '''ViTFeatureExtractor'''),
('''swinv2''', '''ViTFeatureExtractor'''),
('''table-transformer''', '''DetrFeatureExtractor'''),
('''timesformer''', '''VideoMAEFeatureExtractor'''),
('''tvlt''', '''TvltFeatureExtractor'''),
('''unispeech''', '''Wav2Vec2FeatureExtractor'''),
('''unispeech-sat''', '''Wav2Vec2FeatureExtractor'''),
('''van''', '''ConvNextFeatureExtractor'''),
('''videomae''', '''VideoMAEFeatureExtractor'''),
('''vilt''', '''ViltFeatureExtractor'''),
('''vit''', '''ViTFeatureExtractor'''),
('''vit_mae''', '''ViTFeatureExtractor'''),
('''vit_msn''', '''ViTFeatureExtractor'''),
('''wav2vec2''', '''Wav2Vec2FeatureExtractor'''),
('''wav2vec2-conformer''', '''Wav2Vec2FeatureExtractor'''),
('''wavlm''', '''Wav2Vec2FeatureExtractor'''),
('''whisper''', '''WhisperFeatureExtractor'''),
('''xclip''', '''CLIPFeatureExtractor'''),
('''yolos''', '''YolosFeatureExtractor'''),
]
)
__lowercase : List[Any] = _LazyAutoMapping(CONFIG_MAPPING_NAMES, FEATURE_EXTRACTOR_MAPPING_NAMES)
def lowercase_ ( _lowercase ) -> List[Any]:
'''simple docstring'''
for module_name, extractors in FEATURE_EXTRACTOR_MAPPING_NAMES.items():
if class_name in extractors:
lowerCamelCase_ : Dict = model_type_to_module_name(_lowercase )
lowerCamelCase_ : Any = importlib.import_module(F""".{module_name}""" , '''transformers.models''' )
try:
return getattr(_lowercase , _lowercase )
except AttributeError:
continue
for _, extractor in FEATURE_EXTRACTOR_MAPPING._extra_content.items():
if getattr(_lowercase , '''__name__''' , _lowercase ) == class_name:
return extractor
# We did not fine the class, but maybe it's because a dep is missing. In that case, the class will be in the main
# init and we return the proper dummy to get an appropriate error message.
lowerCamelCase_ : Optional[Any] = importlib.import_module('''transformers''' )
if hasattr(_lowercase , _lowercase ):
return getattr(_lowercase , _lowercase )
return None
def lowercase_ ( _lowercase , _lowercase = None , _lowercase = False , _lowercase = False , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = False , **_lowercase , ) -> List[str]:
'''simple docstring'''
lowerCamelCase_ : Optional[int] = get_file_from_repo(
_lowercase , _lowercase , cache_dir=_lowercase , force_download=_lowercase , resume_download=_lowercase , proxies=_lowercase , use_auth_token=_lowercase , revision=_lowercase , local_files_only=_lowercase , )
if resolved_config_file is None:
logger.info(
'''Could not locate the feature extractor configuration file, will try to use the model config instead.''' )
return {}
with open(_lowercase , encoding='''utf-8''' ) as reader:
return json.load(_lowercase )
class __lowercase :
def __init__(self ):
raise EnvironmentError(
'''AutoFeatureExtractor is designed to be instantiated '''
'''using the `AutoFeatureExtractor.from_pretrained(pretrained_model_name_or_path)` method.''' )
@classmethod
@replace_list_option_in_docstrings(A )
def UpperCAmelCase__ (cls , A , **A ):
lowerCamelCase_ : Optional[Any] = kwargs.pop('''config''' , A )
lowerCamelCase_ : Union[str, Any] = kwargs.pop('''trust_remote_code''' , A )
lowerCamelCase_ : List[Any] = True
lowerCamelCase_, lowerCamelCase_ : List[Any] = FeatureExtractionMixin.get_feature_extractor_dict(A , **A )
lowerCamelCase_ : Tuple = config_dict.get('''feature_extractor_type''' , A )
lowerCamelCase_ : List[Any] = None
if "AutoFeatureExtractor" in config_dict.get('''auto_map''' , {} ):
lowerCamelCase_ : Optional[Any] = config_dict['''auto_map''']['''AutoFeatureExtractor''']
# If we don't find the feature extractor class in the feature extractor config, let's try the model config.
if feature_extractor_class is None and feature_extractor_auto_map is None:
if not isinstance(A , A ):
lowerCamelCase_ : List[str] = AutoConfig.from_pretrained(A , **A )
# It could be in `config.feature_extractor_type``
lowerCamelCase_ : Union[str, Any] = getattr(A , '''feature_extractor_type''' , A )
if hasattr(A , '''auto_map''' ) and "AutoFeatureExtractor" in config.auto_map:
lowerCamelCase_ : Optional[int] = config.auto_map['''AutoFeatureExtractor''']
if feature_extractor_class is not None:
lowerCamelCase_ : Any = feature_extractor_class_from_name(A )
lowerCamelCase_ : Optional[int] = feature_extractor_auto_map is not None
lowerCamelCase_ : Optional[Any] = feature_extractor_class is not None or type(A ) in FEATURE_EXTRACTOR_MAPPING
lowerCamelCase_ : int = resolve_trust_remote_code(
A , A , A , A )
if has_remote_code and trust_remote_code:
lowerCamelCase_ : Any = get_class_from_dynamic_module(
A , A , **A )
lowerCamelCase_ : List[Any] = kwargs.pop('''code_revision''' , A )
if os.path.isdir(A ):
feature_extractor_class.register_for_auto_class()
return feature_extractor_class.from_dict(A , **A )
elif feature_extractor_class is not None:
return feature_extractor_class.from_dict(A , **A )
# Last try: we use the FEATURE_EXTRACTOR_MAPPING.
elif type(A ) in FEATURE_EXTRACTOR_MAPPING:
lowerCamelCase_ : Optional[int] = FEATURE_EXTRACTOR_MAPPING[type(A )]
return feature_extractor_class.from_dict(A , **A )
raise ValueError(
F"""Unrecognized feature extractor in {pretrained_model_name_or_path}. Should have a """
F"""`feature_extractor_type` key in its {FEATURE_EXTRACTOR_NAME} of {CONFIG_NAME}, or one of the following """
F"""`model_type` keys in its {CONFIG_NAME}: {", ".join(c for c in FEATURE_EXTRACTOR_MAPPING_NAMES.keys() )}""" )
@staticmethod
def UpperCAmelCase__ (A , A ):
FEATURE_EXTRACTOR_MAPPING.register(A , A )
| 318 | 1 |
"""simple docstring"""
from argparse import ArgumentParser
from .add_new_model import AddNewModelCommand
from .add_new_model_like import AddNewModelLikeCommand
from .convert import ConvertCommand
from .download import DownloadCommand
from .env import EnvironmentCommand
from .lfs import LfsCommands
from .pt_to_tf import PTtoTFCommand
from .run import RunCommand
from .serving import ServeCommand
from .user import UserCommands
def _a ( ) -> List[str]:
snake_case_ = ArgumentParser("""Transformers CLI tool""" , usage="""transformers-cli <command> [<args>]""" )
snake_case_ = parser.add_subparsers(help="""transformers-cli command helpers""" )
# Register commands
ConvertCommand.register_subcommand(__lowerCAmelCase )
DownloadCommand.register_subcommand(__lowerCAmelCase )
EnvironmentCommand.register_subcommand(__lowerCAmelCase )
RunCommand.register_subcommand(__lowerCAmelCase )
ServeCommand.register_subcommand(__lowerCAmelCase )
UserCommands.register_subcommand(__lowerCAmelCase )
AddNewModelCommand.register_subcommand(__lowerCAmelCase )
AddNewModelLikeCommand.register_subcommand(__lowerCAmelCase )
LfsCommands.register_subcommand(__lowerCAmelCase )
PTtoTFCommand.register_subcommand(__lowerCAmelCase )
# Let's go
snake_case_ = parser.parse_args()
if not hasattr(__lowerCAmelCase , """func""" ):
parser.print_help()
exit(1 )
# Run
snake_case_ = args.func(__lowerCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 371 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__SCREAMING_SNAKE_CASE : Dict = logging.get_logger(__name__)
__SCREAMING_SNAKE_CASE : Tuple = {
'google/mobilenet_v1_1.0_224': 'https://huggingface.co/google/mobilenet_v1_1.0_224/resolve/main/config.json',
'google/mobilenet_v1_0.75_192': 'https://huggingface.co/google/mobilenet_v1_0.75_192/resolve/main/config.json',
# See all MobileNetV1 models at https://huggingface.co/models?filter=mobilenet_v1
}
class __A (snake_case__):
'''simple docstring'''
__lowercase: List[Any] = """mobilenet_v1"""
def __init__( self : Union[str, Any] , UpperCAmelCase_ : str=3 , UpperCAmelCase_ : List[Any]=224 , UpperCAmelCase_ : List[Any]=1.0 , UpperCAmelCase_ : Any=8 , UpperCAmelCase_ : int="relu6" , UpperCAmelCase_ : str=True , UpperCAmelCase_ : List[str]=0.999 , UpperCAmelCase_ : Optional[Any]=0.02 , UpperCAmelCase_ : List[Any]=0.001 , **UpperCAmelCase_ : Any , ) ->Union[str, Any]:
"""simple docstring"""
super().__init__(**UpperCAmelCase_ )
if depth_multiplier <= 0:
raise ValueError("""depth_multiplier must be greater than zero.""" )
snake_case_ = num_channels
snake_case_ = image_size
snake_case_ = depth_multiplier
snake_case_ = min_depth
snake_case_ = hidden_act
snake_case_ = tf_padding
snake_case_ = classifier_dropout_prob
snake_case_ = initializer_range
snake_case_ = layer_norm_eps
class __A (snake_case__):
'''simple docstring'''
__lowercase: int = version.parse("""1.11""")
@property
def lowerCAmelCase ( self : Union[str, Any] ) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
return OrderedDict([("""pixel_values""", {0: """batch"""})] )
@property
def lowerCAmelCase ( self : int ) ->Mapping[str, Mapping[int, str]]:
"""simple docstring"""
if self.task == "image-classification":
return OrderedDict([("""logits""", {0: """batch"""})] )
else:
return OrderedDict([("""last_hidden_state""", {0: """batch"""}), ("""pooler_output""", {0: """batch"""})] )
@property
def lowerCAmelCase ( self : int ) ->float:
"""simple docstring"""
return 1E-4
| 233 | 0 |
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class SCREAMING_SNAKE_CASE__ (__snake_case , unittest.TestCase ):
__lowerCamelCase : Optional[Any] = ProphetNetTokenizer
__lowerCamelCase : Tuple = False
def snake_case_ ( self):
super().setUp()
lowercase__ : Any = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
lowercase__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'])
with open(self.vocab_file , 'w' , encoding='utf-8') as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens]))
def snake_case_ ( self , a):
lowercase__ : List[Any] = 'UNwant\u00E9d,running'
lowercase__ : Tuple = 'unwanted, running'
return input_text, output_text
def snake_case_ ( self):
lowercase__ : int = self.tokenizer_class(self.vocab_file)
lowercase__ : Optional[Any] = tokenizer.tokenize('UNwant\u00E9d,running')
self.assertListEqual(a , ['un', '##want', '##ed', ',', 'runn', '##ing'])
self.assertListEqual(tokenizer.convert_tokens_to_ids(a) , [9, 6, 7, 12, 10, 11])
def snake_case_ ( self):
lowercase__ : str = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz') , ['ah', '\u535A', '\u63A8', 'zz'])
def snake_case_ ( self):
lowercase__ : List[Any] = BasicTokenizer(do_lower_case=a)
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ') , ['hello', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['hello'])
def snake_case_ ( self):
lowercase__ : Tuple = BasicTokenizer(do_lower_case=a , strip_accents=a)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['hällo', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['h\u00E9llo'])
def snake_case_ ( self):
lowercase__ : str = BasicTokenizer(do_lower_case=a , strip_accents=a)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['hallo', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['hello'])
def snake_case_ ( self):
lowercase__ : Any = BasicTokenizer(do_lower_case=a)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['hallo', '!', 'how', 'are', 'you', '?'])
self.assertListEqual(tokenizer.tokenize('H\u00E9llo') , ['hello'])
def snake_case_ ( self):
lowercase__ : str = BasicTokenizer(do_lower_case=a)
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ') , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'])
def snake_case_ ( self):
lowercase__ : Optional[Any] = BasicTokenizer(do_lower_case=a , strip_accents=a)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'])
def snake_case_ ( self):
lowercase__ : str = BasicTokenizer(do_lower_case=a , strip_accents=a)
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ') , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'])
def snake_case_ ( self):
lowercase__ : str = BasicTokenizer(do_lower_case=a , never_split=['[UNK]'])
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]') , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'])
def snake_case_ ( self):
lowercase__ : List[Any] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
lowercase__ : List[Any] = {}
for i, token in enumerate(a):
lowercase__ : Union[str, Any] = i
lowercase__ : Union[str, Any] = WordpieceTokenizer(vocab=a , unk_token='[UNK]')
self.assertListEqual(tokenizer.tokenize('') , [])
self.assertListEqual(tokenizer.tokenize('unwanted running') , ['un', '##want', '##ed', 'runn', '##ing'])
self.assertListEqual(tokenizer.tokenize('unwantedX running') , ['[UNK]', 'runn', '##ing'])
@require_torch
def snake_case_ ( self):
lowercase__ : Optional[Any] = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased')
lowercase__ : List[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
lowercase__ : Optional[Any] = [1037, 2146, 2_0423, 2005, 7680, 7849, 3989, 1012, 102]
lowercase__ : str = tokenizer(a , padding=a , return_tensors='pt')
self.assertIsInstance(a , a)
lowercase__ : Optional[Any] = list(batch.input_ids.numpy()[0])
self.assertListEqual(a , a)
self.assertEqual((2, 9) , batch.input_ids.shape)
self.assertEqual((2, 9) , batch.attention_mask.shape)
def snake_case_ ( self):
self.assertTrue(_is_whitespace(' '))
self.assertTrue(_is_whitespace('\t'))
self.assertTrue(_is_whitespace('\r'))
self.assertTrue(_is_whitespace('\n'))
self.assertTrue(_is_whitespace('\u00A0'))
self.assertFalse(_is_whitespace('A'))
self.assertFalse(_is_whitespace('-'))
def snake_case_ ( self):
self.assertTrue(_is_control('\u0005'))
self.assertFalse(_is_control('A'))
self.assertFalse(_is_control(' '))
self.assertFalse(_is_control('\t'))
self.assertFalse(_is_control('\r'))
def snake_case_ ( self):
self.assertTrue(_is_punctuation('-'))
self.assertTrue(_is_punctuation('$'))
self.assertTrue(_is_punctuation('`'))
self.assertTrue(_is_punctuation('.'))
self.assertFalse(_is_punctuation('A'))
self.assertFalse(_is_punctuation(' '))
@slow
def snake_case_ ( self):
lowercase__ : int = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased')
lowercase__ : Optional[Any] = tokenizer.encode('sequence builders' , add_special_tokens=a)
lowercase__ : str = tokenizer.encode('multi-sequence build' , add_special_tokens=a)
lowercase__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(a)
lowercase__ : Optional[int] = tokenizer.build_inputs_with_special_tokens(a , a)
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102]
| 214 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
snake_case_ = logging.get_logger(__name__)
snake_case_ = {
'''google/switch-base-8''': '''https://huggingface.co/google/switch-base-8/blob/main/config.json''',
}
class SCREAMING_SNAKE_CASE__ (__snake_case ):
__lowerCamelCase : int = """switch_transformers"""
__lowerCamelCase : Optional[Any] = ["""past_key_values"""]
__lowerCamelCase : Any = {"""hidden_size""": """d_model""", """num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self , a=3_2128 , a=768 , a=64 , a=2048 , a=64 , a=12 , a=3 , a=12 , a=3 , a=12 , a=8 , a=False , a=0.01 , a="float32" , a=False , a=32 , a=128 , a=0.1 , a=1e-6 , a=0.001 , a=0.001 , a=1.0 , a="relu" , a=True , a=False , a=True , a=0 , a=1 , **a , ):
lowercase__ : Optional[int] = vocab_size
lowercase__ : List[Any] = d_model
lowercase__ : List[Any] = d_kv
lowercase__ : Any = d_ff
lowercase__ : Optional[int] = num_sparse_encoder_layers
lowercase__ : Tuple = num_layers
lowercase__ : Optional[Any] = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
lowercase__ : str = num_sparse_decoder_layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_encoder_layers > 0:
lowercase__ : Optional[Any] = self.num_layers // self.num_sparse_encoder_layers
else:
lowercase__ : Optional[int] = self.num_layers # HACK: this will create 0 sparse layers
# This tells us, each how many encoder layer we'll have to set a sparse layer.
if self.num_sparse_decoder_layers > 0:
lowercase__ : Union[str, Any] = self.num_decoder_layers // self.num_sparse_decoder_layers
else:
lowercase__ : int = self.num_decoder_layers # HACK: this will create 0 sparse layers
lowercase__ : List[Any] = num_heads
lowercase__ : Union[str, Any] = num_experts
lowercase__ : str = expert_capacity
lowercase__ : List[Any] = router_bias
lowercase__ : Optional[int] = router_jitter_noise
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f"""`router_dtype` must be one of 'float32', 'float16' or 'bfloat16', got {router_dtype}""")
lowercase__ : str = router_dtype
lowercase__ : Optional[int] = router_ignore_padding_tokens
lowercase__ : int = relative_attention_num_buckets
lowercase__ : Optional[Any] = relative_attention_max_distance
lowercase__ : List[str] = dropout_rate
lowercase__ : str = layer_norm_epsilon
lowercase__ : int = initializer_factor
lowercase__ : int = feed_forward_proj
lowercase__ : Dict = use_cache
lowercase__ : int = add_router_probs
lowercase__ : int = router_z_loss_coef
lowercase__ : List[Any] = router_aux_loss_coef
lowercase__ : int = self.feed_forward_proj.split('-')
lowercase__ : Optional[int] = act_info[-1]
lowercase__ : Dict = act_info[0] == 'gated'
if len(a) > 1 and act_info[0] != "gated" or len(a) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
'Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '
'\'gated-gelu\' or \'relu\'')
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
lowercase__ : Optional[int] = 'gelu_new'
super().__init__(
pad_token_id=a , eos_token_id=a , is_encoder_decoder=a , **a , )
| 214 | 1 |
"""simple docstring"""
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
# and perform gradient accumulation
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCamelCase__ = 16
lowerCamelCase__ = 32
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase = 16 ):
__lowerCAmelCase : str = AutoTokenizer.from_pretrained('bert-base-cased' )
__lowerCAmelCase : str = load_dataset('glue' , 'mrpc' )
def tokenize_function(_UpperCamelCase ):
# max_length=None => use the model max length (it's actually the default)
__lowerCAmelCase : List[str] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_UpperCamelCase , max_length=_UpperCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowerCAmelCase : Tuple = datasets.map(
_UpperCamelCase , batched=_UpperCamelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCAmelCase : List[Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(_UpperCamelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowerCAmelCase : int = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowerCAmelCase : Optional[Any] = 16
elif accelerator.mixed_precision != "no":
__lowerCAmelCase : Optional[Any] = 8
else:
__lowerCAmelCase : int = None
return tokenizer.pad(
_UpperCamelCase , padding='longest' , max_length=_UpperCamelCase , pad_to_multiple_of=_UpperCamelCase , return_tensors='pt' , )
# Instantiate dataloaders.
__lowerCAmelCase : List[Any] = DataLoader(
tokenized_datasets['train'] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=_UpperCamelCase )
__lowerCAmelCase : str = DataLoader(
tokenized_datasets['validation'] , shuffle=_UpperCamelCase , collate_fn=_UpperCamelCase , batch_size=_UpperCamelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
lowerCamelCase__ = mocked_dataloaders # noqa: F811
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
# For testing only
if os.environ.get('TESTING_MOCKED_DATALOADERS' , _UpperCamelCase ) == "1":
__lowerCAmelCase : Dict = 2
# New Code #
__lowerCAmelCase : Dict = int(args.gradient_accumulation_steps )
# Initialize accelerator
__lowerCAmelCase : List[Any] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , gradient_accumulation_steps=_UpperCamelCase )
if accelerator.distributed_type == DistributedType.TPU and gradient_accumulation_steps > 1:
raise NotImplementedError(
'Gradient accumulation on TPUs is currently not supported. Pass `gradient_accumulation_steps=1`' )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCAmelCase : Optional[Any] = config['lr']
__lowerCAmelCase : List[str] = int(config['num_epochs'] )
__lowerCAmelCase : Dict = int(config['seed'] )
__lowerCAmelCase : Dict = int(config['batch_size'] )
__lowerCAmelCase : Tuple = evaluate.load('glue' , 'mrpc' )
set_seed(_UpperCamelCase )
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = get_dataloaders(_UpperCamelCase , _UpperCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCAmelCase : Optional[Any] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=_UpperCamelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCAmelCase : Optional[int] = model.to(accelerator.device )
# Instantiate optimizer
__lowerCAmelCase : Union[str, Any] = AdamW(params=model.parameters() , lr=_UpperCamelCase )
# Instantiate scheduler
__lowerCAmelCase : int = get_linear_schedule_with_warmup(
optimizer=_UpperCamelCase , num_warmup_steps=100 , num_training_steps=(len(_UpperCamelCase ) * num_epochs) , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : int = accelerator.prepare(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Now we train the model
for epoch in range(_UpperCamelCase ):
model.train()
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
# New code #
# We use the new `accumulate` context manager to perform gradient accumulation
# We also currently do not support TPUs nor advise it as bugs were found on the XLA side when running our tests.
with accelerator.accumulate(_UpperCamelCase ):
__lowerCAmelCase : Tuple = model(**_UpperCamelCase )
__lowerCAmelCase : Union[str, Any] = output.loss
accelerator.backward(_UpperCamelCase )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(_UpperCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCAmelCase : List[str] = model(**_UpperCamelCase )
__lowerCAmelCase : Tuple = outputs.logits.argmax(dim=-1 )
__lowerCAmelCase , __lowerCAmelCase : Tuple = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=_UpperCamelCase , references=_UpperCamelCase , )
__lowerCAmelCase : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , _UpperCamelCase )
def __lowerCAmelCase ():
__lowerCAmelCase : str = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=_UpperCamelCase , default=_UpperCamelCase , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
# New Code #
parser.add_argument(
'--gradient_accumulation_steps' , type=_UpperCamelCase , default=1 , help='The number of minibatches to be ran before gradients are accumulated.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
__lowerCAmelCase : List[Any] = parser.parse_args()
__lowerCAmelCase : Optional[Any] = {'lr': 2e-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
main() | 182 |
"""simple docstring"""
from typing import Optional
from urllib.parse import quote
import huggingface_hub as hfh
from packaging import version
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = None ):
if version.parse(hfh.__version__ ).release < version.parse('0.11.0' ).release:
# old versions of hfh don't url-encode the file path
__lowerCAmelCase : List[str] = quote(_UpperCamelCase )
return hfh.hf_hub_url(_UpperCamelCase , _UpperCamelCase , repo_type='dataset' , revision=_UpperCamelCase ) | 182 | 1 |
from ...configuration_utils import PretrainedConfig
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
A_ : List[str] = """bert-generation"""
def __init__(self : Optional[int] , a__ : Optional[int]=5_0358 , a__ : Dict=1024 , a__ : List[Any]=24 , a__ : List[str]=16 , a__ : Optional[int]=4096 , a__ : Union[str, Any]="gelu" , a__ : Union[str, Any]=0.1 , a__ : Union[str, Any]=0.1 , a__ : Dict=512 , a__ : int=0.0_2 , a__ : str=1E-12 , a__ : str=0 , a__ : List[str]=2 , a__ : int=1 , a__ : int="absolute" , a__ : Optional[Any]=True , **a__ : Dict , ):
"""simple docstring"""
super().__init__(pad_token_id=lowercase_ , bos_token_id=lowercase_ , eos_token_id=lowercase_ , **lowercase_ )
__snake_case = vocab_size
__snake_case = hidden_size
__snake_case = num_hidden_layers
__snake_case = num_attention_heads
__snake_case = hidden_act
__snake_case = intermediate_size
__snake_case = hidden_dropout_prob
__snake_case = attention_probs_dropout_prob
__snake_case = max_position_embeddings
__snake_case = initializer_range
__snake_case = layer_norm_eps
__snake_case = position_embedding_type
__snake_case = use_cache
| 24 |
"""simple docstring"""
from collections import namedtuple
_a = namedtuple('from_to', 'from_ to')
_a = {
'cubicmeter': from_to(1, 1),
'litre': from_to(0.001, 1_000),
'kilolitre': from_to(1, 1),
'gallon': from_to(0.0_0454, 264.172),
'cubicyard': from_to(0.7_6455, 1.3_0795),
'cubicfoot': from_to(0.028, 35.3147),
'cup': from_to(0.0_0023_6588, 4226.75),
}
def __a ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if from_type not in METRIC_CONVERSION:
raise ValueError(
f"""Invalid 'from_type' value: {from_type!r} Supported values are:\n"""
+ ", ".join(__lowerCamelCase ) )
if to_type not in METRIC_CONVERSION:
raise ValueError(
f"""Invalid 'to_type' value: {to_type!r}. Supported values are:\n"""
+ ", ".join(__lowerCamelCase ) )
return value * METRIC_CONVERSION[from_type].from_ * METRIC_CONVERSION[to_type].to
if __name__ == "__main__":
import doctest
doctest.testmod()
| 61 | 0 |
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def _A ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Optional[Any] , lowerCAmelCase_ : List[str]=[] ):
"""simple docstring"""
lowerCAmelCase__ = size[0] - overlap_pixels * 2
lowerCAmelCase__ = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
lowerCAmelCase__ = np.ones((size_y, size_x) , dtype=np.uinta ) * 255
lowerCAmelCase__ = np.pad(lowerCAmelCase_ , mode="linear_ramp" , pad_width=lowerCAmelCase_ , end_values=0 )
if "l" in remove_borders:
lowerCAmelCase__ = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
lowerCAmelCase__ = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
lowerCAmelCase__ = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
lowerCAmelCase__ = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def _A ( lowerCAmelCase_ : List[Any] , lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Optional[int] ):
"""simple docstring"""
return max(lowerCAmelCase_ , min(lowerCAmelCase_ , lowerCAmelCase_ ) )
def _A ( lowerCAmelCase_ : [int] , lowerCAmelCase_ : [int] , lowerCAmelCase_ : [int] ):
"""simple docstring"""
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def _A ( lowerCAmelCase_ : [int] , lowerCAmelCase_ : int , lowerCAmelCase_ : [int] ):
"""simple docstring"""
lowerCAmelCase__ = list(lowerCAmelCase_ )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
lowerCAmelCase__ = clamp_rect(lowerCAmelCase_ , [0, 0] , [image_size[0], image_size[1]] )
return rect
def _A ( lowerCAmelCase_ : Tuple , lowerCAmelCase_ : Union[str, Any] , lowerCAmelCase_ : List[str] , lowerCAmelCase_ : Optional[Any] ):
"""simple docstring"""
lowerCAmelCase__ = Image.new("RGB" , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(lowerCAmelCase_ , (original_slice, 0) )
return result
def _A ( lowerCAmelCase_ : int , lowerCAmelCase_ : Union[str, Any] ):
"""simple docstring"""
lowerCAmelCase__ = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
lowerCAmelCase__ = tile.crop(lowerCAmelCase_ )
return tile
def _A ( lowerCAmelCase_ : List[str] , lowerCAmelCase_ : int ):
"""simple docstring"""
lowerCAmelCase__ = n % d
return n - divisor
class __lowerCamelCase ( UpperCamelCase__ ):
"""simple docstring"""
def __init__( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : AutoencoderKL , SCREAMING_SNAKE_CASE__ : CLIPTextModel , SCREAMING_SNAKE_CASE__ : CLIPTokenizer , SCREAMING_SNAKE_CASE__ : UNetaDConditionModel , SCREAMING_SNAKE_CASE__ : DDPMScheduler , SCREAMING_SNAKE_CASE__ : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , SCREAMING_SNAKE_CASE__ : int = 350 , ) -> List[Any]:
super().__init__(
vae=SCREAMING_SNAKE_CASE__ , text_encoder=SCREAMING_SNAKE_CASE__ , tokenizer=SCREAMING_SNAKE_CASE__ , unet=SCREAMING_SNAKE_CASE__ , low_res_scheduler=SCREAMING_SNAKE_CASE__ , scheduler=SCREAMING_SNAKE_CASE__ , max_noise_level=SCREAMING_SNAKE_CASE__ , )
def a ( self : int , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ) -> Union[str, Any]:
torch.manual_seed(0 )
lowerCAmelCase__ = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
lowerCAmelCase__ = add_overlap_rect(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , image.size )
lowerCAmelCase__ = image.crop(SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
lowerCAmelCase__ = translated_slice_x - (original_image_slice / 2)
lowerCAmelCase__ = max(0 , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = squeeze_tile(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = to_input.size
lowerCAmelCase__ = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
lowerCAmelCase__ = super(SCREAMING_SNAKE_CASE__ , self ).__call__(image=SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ ).images[0]
lowerCAmelCase__ = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
lowerCAmelCase__ = unsqueeze_tile(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
lowerCAmelCase__ = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
lowerCAmelCase__ = []
if x == 0:
remove_borders.append("l" )
elif crop_rect[2] == image.size[0]:
remove_borders.append("r" )
if y == 0:
remove_borders.append("t" )
elif crop_rect[3] == image.size[1]:
remove_borders.append("b" )
lowerCAmelCase__ = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=SCREAMING_SNAKE_CASE__ ) , mode="L" , )
final_image.paste(
SCREAMING_SNAKE_CASE__ , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , SCREAMING_SNAKE_CASE__ )
@torch.no_grad()
def __call__( self : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, List[str]] , SCREAMING_SNAKE_CASE__ : Union[PIL.Image.Image, List[PIL.Image.Image]] , SCREAMING_SNAKE_CASE__ : int = 75 , SCREAMING_SNAKE_CASE__ : float = 9.0 , SCREAMING_SNAKE_CASE__ : int = 50 , SCREAMING_SNAKE_CASE__ : Optional[Union[str, List[str]]] = None , SCREAMING_SNAKE_CASE__ : Optional[int] = 1 , SCREAMING_SNAKE_CASE__ : float = 0.0 , SCREAMING_SNAKE_CASE__ : Optional[torch.Generator] = None , SCREAMING_SNAKE_CASE__ : Optional[torch.FloatTensor] = None , SCREAMING_SNAKE_CASE__ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , SCREAMING_SNAKE_CASE__ : int = 1 , SCREAMING_SNAKE_CASE__ : int = 128 , SCREAMING_SNAKE_CASE__ : int = 32 , SCREAMING_SNAKE_CASE__ : int = 32 , ) -> Optional[Any]:
lowerCAmelCase__ = Image.new("RGB" , (image.size[0] * 4, image.size[1] * 4) )
lowerCAmelCase__ = math.ceil(image.size[0] / tile_size )
lowerCAmelCase__ = math.ceil(image.size[1] / tile_size )
lowerCAmelCase__ = tcx * tcy
lowerCAmelCase__ = 0
for y in range(SCREAMING_SNAKE_CASE__ ):
for x in range(SCREAMING_SNAKE_CASE__ ):
self._process_tile(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , prompt=SCREAMING_SNAKE_CASE__ , num_inference_steps=SCREAMING_SNAKE_CASE__ , guidance_scale=SCREAMING_SNAKE_CASE__ , noise_level=SCREAMING_SNAKE_CASE__ , negative_prompt=SCREAMING_SNAKE_CASE__ , num_images_per_prompt=SCREAMING_SNAKE_CASE__ , eta=SCREAMING_SNAKE_CASE__ , generator=SCREAMING_SNAKE_CASE__ , latents=SCREAMING_SNAKE_CASE__ , )
current_count += 1
if callback is not None:
callback({"progress": current_count / total_tile_count, "image": final_image} )
return final_image
def _A ( ):
"""simple docstring"""
lowerCAmelCase__ = "stabilityai/stable-diffusion-x4-upscaler"
lowerCAmelCase__ = StableDiffusionTiledUpscalePipeline.from_pretrained(lowerCAmelCase_ , revision="fp16" , torch_dtype=torch.floataa )
lowerCAmelCase__ = pipe.to("cuda" )
lowerCAmelCase__ = Image.open("../../docs/source/imgs/diffusers_library.jpg" )
def callback(lowerCAmelCase_ : int ):
print(F'progress: {obj["progress"]:.4f}' )
obj["image"].save("diffusers_library_progress.jpg" )
lowerCAmelCase__ = pipe(image=lowerCAmelCase_ , prompt="Black font, white background, vector" , noise_level=40 , callback=lowerCAmelCase_ )
final_image.save("diffusers_library.jpg" )
if __name__ == "__main__":
main()
| 221 |
from __future__ import annotations
UpperCamelCase = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def _A ( lowerCAmelCase_ : list[list[int]] , lowerCAmelCase_ : list[int] , lowerCAmelCase_ : list[int] , lowerCAmelCase_ : int , lowerCAmelCase_ : list[list[int]] , ):
"""simple docstring"""
lowerCAmelCase__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(lowerCAmelCase_ ) )
] # the reference grid
lowerCAmelCase__ = 1
lowerCAmelCase__ = [
[0 for col in range(len(grid[0] ) )] for row in range(len(lowerCAmelCase_ ) )
] # the action grid
lowerCAmelCase__ = init[0]
lowerCAmelCase__ = init[1]
lowerCAmelCase__ = 0
lowerCAmelCase__ = g + heuristic[x][y] # cost from starting cell to destination cell
lowerCAmelCase__ = [[f, g, x, y]]
lowerCAmelCase__ = False # flag that is set when search is complete
lowerCAmelCase__ = False # flag set if we can't find expand
while not found and not resign:
if len(lowerCAmelCase_ ) == 0:
raise ValueError("Algorithm is unable to find solution" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
lowerCAmelCase__ = cell.pop()
lowerCAmelCase__ = next_cell[2]
lowerCAmelCase__ = next_cell[3]
lowerCAmelCase__ = next_cell[1]
if x == goal[0] and y == goal[1]:
lowerCAmelCase__ = True
else:
for i in range(len(lowerCAmelCase_ ) ): # to try out different valid actions
lowerCAmelCase__ = x + DIRECTIONS[i][0]
lowerCAmelCase__ = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(lowerCAmelCase_ ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
lowerCAmelCase__ = g + cost
lowerCAmelCase__ = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
lowerCAmelCase__ = 1
lowerCAmelCase__ = i
lowerCAmelCase__ = []
lowerCAmelCase__ = goal[0]
lowerCAmelCase__ = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
lowerCAmelCase__ = x - DIRECTIONS[action[x][y]][0]
lowerCAmelCase__ = y - DIRECTIONS[action[x][y]][1]
lowerCAmelCase__ = xa
lowerCAmelCase__ = ya
invpath.append([x, y] )
lowerCAmelCase__ = []
for i in range(len(lowerCAmelCase_ ) ):
path.append(invpath[len(lowerCAmelCase_ ) - 1 - i] )
return path, action
if __name__ == "__main__":
UpperCamelCase = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
UpperCamelCase = [0, 0]
# all coordinates are given in format [y,x]
UpperCamelCase = [len(grid) - 1, len(grid[0]) - 1]
UpperCamelCase = 1
# the cost map which pushes the path closer to the goal
UpperCamelCase = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
UpperCamelCase = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
UpperCamelCase = 99
UpperCamelCase , UpperCamelCase = search(grid, init, goal, cost, heuristic)
print('ACTION MAP')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 221 | 1 |
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase__ : Optional[int] = logging.get_logger(__name__)
UpperCAmelCase__ : Tuple = {
"""facebook/encodec_24khz""": """https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json""",
"""facebook/encodec_48khz""": """https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json""",
}
class a__ ( UpperCAmelCase ):
"""simple docstring"""
UpperCAmelCase__ : int ="""encodec"""
def __init__( self : Optional[int] , UpperCAmelCase__ : Dict=[1.5, 3.0, 6.0, 12.0, 24.0] , UpperCAmelCase__ : str=2_4_0_0_0 , UpperCAmelCase__ : int=1 , UpperCAmelCase__ : int=False , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Optional[int]=None , UpperCAmelCase__ : Tuple=1_2_8 , UpperCAmelCase__ : Any=3_2 , UpperCAmelCase__ : List[Any]=1 , UpperCAmelCase__ : Optional[int]=[8, 5, 4, 2] , UpperCAmelCase__ : List[str]="weight_norm" , UpperCAmelCase__ : Any=7 , UpperCAmelCase__ : Optional[int]=7 , UpperCAmelCase__ : Any=3 , UpperCAmelCase__ : str=2 , UpperCAmelCase__ : Optional[Any]=True , UpperCAmelCase__ : Optional[Any]="reflect" , UpperCAmelCase__ : Tuple=2 , UpperCAmelCase__ : Optional[Any]=2 , UpperCAmelCase__ : List[str]=1.0 , UpperCAmelCase__ : List[str]=1_0_2_4 , UpperCAmelCase__ : Any=None , UpperCAmelCase__ : Optional[Any]=True , **UpperCAmelCase__ : Optional[Any] , ) ->Union[str, Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[Any] = target_bandwidths
SCREAMING_SNAKE_CASE : Optional[Any] = sampling_rate
SCREAMING_SNAKE_CASE : List[str] = audio_channels
SCREAMING_SNAKE_CASE : Optional[int] = normalize
SCREAMING_SNAKE_CASE : Optional[int] = chunk_length_s
SCREAMING_SNAKE_CASE : Tuple = overlap
SCREAMING_SNAKE_CASE : Union[str, Any] = hidden_size
SCREAMING_SNAKE_CASE : Union[str, Any] = num_filters
SCREAMING_SNAKE_CASE : Optional[int] = num_residual_layers
SCREAMING_SNAKE_CASE : Optional[int] = upsampling_ratios
SCREAMING_SNAKE_CASE : str = norm_type
SCREAMING_SNAKE_CASE : Optional[int] = kernel_size
SCREAMING_SNAKE_CASE : Optional[int] = last_kernel_size
SCREAMING_SNAKE_CASE : Optional[int] = residual_kernel_size
SCREAMING_SNAKE_CASE : Dict = dilation_growth_rate
SCREAMING_SNAKE_CASE : int = use_causal_conv
SCREAMING_SNAKE_CASE : Tuple = pad_mode
SCREAMING_SNAKE_CASE : Union[str, Any] = compress
SCREAMING_SNAKE_CASE : List[Any] = num_lstm_layers
SCREAMING_SNAKE_CASE : Optional[int] = trim_right_ratio
SCREAMING_SNAKE_CASE : Tuple = codebook_size
SCREAMING_SNAKE_CASE : Optional[Any] = codebook_dim if codebook_dim is not None else hidden_size
SCREAMING_SNAKE_CASE : List[Any] = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}" )
super().__init__(**UpperCAmelCase__ )
@property
def _lowercase ( self : Optional[Any] ) ->Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _lowercase ( self : str ) ->Optional[int]:
"""simple docstring"""
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def _lowercase ( self : List[Any] ) ->int:
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def _lowercase ( self : Dict ) ->int:
"""simple docstring"""
return int(1_0_0_0 * self.target_bandwidths[-1] // (self.frame_rate * 1_0) )
| 245 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def __lowercase ( _A , _A , _A ) -> int:
SCREAMING_SNAKE_CASE : Optional[Any] = {
"""en""": """Machine learning is great, isn't it?""",
"""ru""": """Машинное обучение - это здорово, не так ли?""",
"""de""": """Maschinelles Lernen ist großartig, oder?""",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
SCREAMING_SNAKE_CASE : int = {
"""ru-en""": ["""[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)""", """39.20"""],
"""en-ru""": ["""[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)""", """33.47"""],
"""en-de""": ["""[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)""", """42.83"""],
"""de-en""": ["""[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)""", """41.35"""],
}
SCREAMING_SNAKE_CASE : List[Any] = F"{src_lang}-{tgt_lang}"
SCREAMING_SNAKE_CASE : List[str] = F"\n---\nlanguage: \n- {src_lang}\n- {tgt_lang}\nthumbnail:\ntags:\n- translation\n- wmt19\n- facebook\nlicense: apache-2.0\ndatasets:\n- wmt19\nmetrics:\n- bleu\n---\n\n# FSMT\n\n## Model description\n\nThis is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.\n\nFor more details, please see, [Facebook FAIR's WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).\n\nThe abbreviation FSMT stands for FairSeqMachineTranslation\n\nAll four models are available:\n\n* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)\n* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)\n* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)\n* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)\n\n## Intended uses & limitations\n\n#### How to use\n\n```python\nfrom transformers import FSMTForConditionalGeneration, FSMTTokenizer\nmname = \"facebook/wmt19-{src_lang}-{tgt_lang}\"\ntokenizer = FSMTTokenizer.from_pretrained(mname)\nmodel = FSMTForConditionalGeneration.from_pretrained(mname)\n\ninput = \"{texts[src_lang]}\"\ninput_ids = tokenizer.encode(input, return_tensors=\"pt\")\noutputs = model.generate(input_ids)\ndecoded = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(decoded) # {texts[tgt_lang]}\n\n```\n\n#### Limitations and bias\n\n- The original (and this ported model) doesn't seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)\n\n## Training data\n\nPretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).\n\n## Eval results\n\npair | fairseq | transformers\n-------|---------|----------\n{pair} | {scores[pair][0]} | {scores[pair][1]}\n\nThe score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn't support:\n- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).\n- re-ranking\n\nThe score was calculated using this code:\n\n```bash\ngit clone https://github.com/huggingface/transformers\ncd transformers\nexport PAIR={pair}\nexport DATA_DIR=data/$PAIR\nexport SAVE_DIR=data/$PAIR\nexport BS=8\nexport NUM_BEAMS=15\nmkdir -p $DATA_DIR\nsacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source\nsacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target\necho $PAIR\nPYTHONPATH=\"src:examples/seq2seq\" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS\n```\nnote: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.\n\n## Data Sources\n\n- [training, etc.](http://www.statmt.org/wmt19/)\n- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)\n\n\n### BibTeX entry and citation info\n\n```bibtex\n@inproceedings{{...,\n year={{2020}},\n title={{Facebook FAIR's WMT19 News Translation Task Submission}},\n author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},\n booktitle={{Proc. of WMT}},\n}}\n```\n\n\n## TODO\n\n- port model ensemble (fairseq uses 4 model checkpoints)\n\n"
os.makedirs(_A , exist_ok=_A )
SCREAMING_SNAKE_CASE : int = os.path.join(_A , """README.md""" )
print(F"Generating {path}" )
with open(_A , """w""" , encoding="""utf-8""" ) as f:
f.write(_A )
# make sure we are under the root of the project
UpperCAmelCase__ : List[str] = Path(__file__).resolve().parent.parent.parent
UpperCAmelCase__ : Dict = repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ : List[str] = model_name.split("""-""")
UpperCAmelCase__ : Tuple = model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 245 | 1 |
'''simple docstring'''
import inspect
import unittest
from huggingface_hub import hf_hub_download
from transformers import ConvNextConfig, UperNetConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import UperNetForSemanticSegmentation
from transformers.models.upernet.modeling_upernet import UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCamelCase :
def __init__( self, lowercase_, lowercase_=13, lowercase_=32, lowercase_=3, lowercase_=4, lowercase_=[10, 20, 30, 40], lowercase_=[2, 2, 3, 2], lowercase_=True, lowercase_=True, lowercase_=37, lowercase_="gelu", lowercase_=10, lowercase_=0.02, lowercase_=["stage2", "stage3", "stage4"], lowercase_=3, lowercase_=None, ) -> Union[str, Any]:
snake_case = parent
snake_case = batch_size
snake_case = image_size
snake_case = num_channels
snake_case = num_stages
snake_case = hidden_sizes
snake_case = depths
snake_case = is_training
snake_case = use_labels
snake_case = intermediate_size
snake_case = hidden_act
snake_case = type_sequence_label_size
snake_case = initializer_range
snake_case = out_features
snake_case = num_labels
snake_case = scope
snake_case = num_stages
def _lowerCamelCase ( self ) -> str:
snake_case = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case = None
if self.use_labels:
snake_case = ids_tensor([self.batch_size], self.type_sequence_label_size )
snake_case = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self ) -> int:
return ConvNextConfig(
num_channels=self.num_channels, num_stages=self.num_stages, hidden_sizes=self.hidden_sizes, depths=self.depths, is_training=self.is_training, intermediate_size=self.intermediate_size, hidden_act=self.hidden_act, out_features=self.out_features, )
def _lowerCamelCase ( self ) -> Dict:
return UperNetConfig(
backbone_config=self.get_backbone_config(), hidden_size=512, pool_scales=[1, 2, 3, 6], use_auxiliary_head=lowercase_, auxiliary_loss_weight=0.4, auxiliary_in_channels=40, auxiliary_channels=256, auxiliary_num_convs=1, auxiliary_concat_input=lowercase_, loss_ignore_index=255, num_labels=self.num_labels, )
def _lowerCamelCase ( self, lowercase_, lowercase_, lowercase_ ) -> List[str]:
snake_case = UperNetForSemanticSegmentation(config=lowercase_ )
model.to(lowercase_ )
model.eval()
snake_case = model(lowercase_ )
self.parent.assertEqual(
result.logits.shape, (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _lowerCamelCase ( self ) -> str:
snake_case = self.prepare_config_and_inputs()
(
(
snake_case
) , (
snake_case
) , (
snake_case
) ,
) = config_and_inputs
snake_case = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
snake_case_ = (UperNetForSemanticSegmentation,) if is_torch_available() else ()
snake_case_ = {'''image-segmentation''': UperNetForSemanticSegmentation} if is_torch_available() else {}
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
snake_case_ = False
def _lowerCamelCase ( self ) -> str:
snake_case = UperNetModelTester(self )
snake_case = ConfigTester(self, config_class=lowercase_, has_text_modality=lowercase_, hidden_size=37 )
def _lowerCamelCase ( self ) -> str:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self ) -> Any:
return
def _lowerCamelCase ( self ) -> int:
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = model_class(lowercase_ )
snake_case = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case = [*signature.parameters.keys()]
snake_case = ['pixel_values']
self.assertListEqual(arg_names[:1], lowercase_ )
def _lowerCamelCase ( self ) -> Optional[int]:
snake_case = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*lowercase_ )
@unittest.skip(reason='UperNet does not use inputs_embeds' )
def _lowerCamelCase ( self ) -> List[str]:
pass
@unittest.skip(reason='UperNet does not support input and output embeddings' )
def _lowerCamelCase ( self ) -> int:
pass
@unittest.skip(reason='UperNet does not have a base model' )
def _lowerCamelCase ( self ) -> Any:
pass
@unittest.skip(reason='UperNet does not have a base model' )
def _lowerCamelCase ( self ) -> Any:
pass
@require_torch_multi_gpu
@unittest.skip(reason='UperNet has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _lowerCamelCase ( self ) -> Optional[Any]:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _lowerCamelCase ( self ) -> Optional[Any]:
pass
def _lowerCamelCase ( self ) -> Optional[int]:
def check_hidden_states_output(lowercase_, lowercase_, lowercase_ ):
snake_case = model_class(lowercase_ )
model.to(lowercase_ )
model.eval()
with torch.no_grad():
snake_case = model(**self._prepare_for_class(lowercase_, lowercase_ ) )
snake_case = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
snake_case = self.model_tester.num_stages
self.assertEqual(len(lowercase_ ), expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ), [self.model_tester.image_size // 4, self.model_tester.image_size // 4], )
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case = True
check_hidden_states_output(lowercase_, lowercase_, lowercase_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case = True
check_hidden_states_output(lowercase_, lowercase_, lowercase_ )
def _lowerCamelCase ( self ) -> Any:
snake_case , snake_case = self.model_tester.prepare_config_and_inputs_for_common()
snake_case = _config_zero_init(lowercase_ )
snake_case = _config_zero_init(configs_no_init.backbone_config )
for model_class in self.all_model_classes:
snake_case = model_class(config=lowercase_ )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item(), [0.0, 1.0], msg=F'''Parameter {name} of model {model_class} seems not properly initialized''', )
@unittest.skip(reason='UperNet does not have tied weights' )
def _lowerCamelCase ( self ) -> Optional[int]:
pass
@slow
def _lowerCamelCase ( self ) -> Optional[Any]:
for model_name in UPERNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case = UperNetForSemanticSegmentation.from_pretrained(lowercase_ )
self.assertIsNotNone(lowercase_ )
def __magic_name__ ( ) -> str:
snake_case = hf_hub_download(
repo_id='hf-internal-testing/fixtures_ade20k' , repo_type='dataset' , filename='ADE_val_00000001.jpg' )
snake_case = Image.open(A ).convert('RGB' )
return image
@require_torch
@require_vision
@slow
class lowerCamelCase ( unittest.TestCase ):
def _lowerCamelCase ( self ) -> Tuple:
snake_case = AutoImageProcessor.from_pretrained('openmmlab/upernet-swin-tiny' )
snake_case = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-swin-tiny' ).to(lowercase_ )
snake_case = prepare_img()
snake_case = processor(images=lowercase_, return_tensors='pt' ).to(lowercase_ )
with torch.no_grad():
snake_case = model(**lowercase_ )
snake_case = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, lowercase_ )
snake_case = torch.tensor(
[[-7.5_958, -7.5_958, -7.4_302], [-7.5_958, -7.5_958, -7.4_302], [-7.4_797, -7.4_797, -7.3_068]] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], lowercase_, atol=1E-4 ) )
def _lowerCamelCase ( self ) -> Dict:
snake_case = AutoImageProcessor.from_pretrained('openmmlab/upernet-convnext-tiny' )
snake_case = UperNetForSemanticSegmentation.from_pretrained('openmmlab/upernet-convnext-tiny' ).to(lowercase_ )
snake_case = prepare_img()
snake_case = processor(images=lowercase_, return_tensors='pt' ).to(lowercase_ )
with torch.no_grad():
snake_case = model(**lowercase_ )
snake_case = torch.Size((1, model.config.num_labels, 512, 512) )
self.assertEqual(outputs.logits.shape, lowercase_ )
snake_case = torch.tensor(
[[-8.8_110, -8.8_110, -8.6_521], [-8.8_110, -8.8_110, -8.6_521], [-8.7_746, -8.7_746, -8.6_130]] ).to(lowercase_ )
self.assertTrue(torch.allclose(outputs.logits[0, 0, :3, :3], lowercase_, atol=1E-4 ) )
| 332 |
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( A ) -> list:
if len(A ) == 0:
return []
snake_case , snake_case = min(A ), max(A )
snake_case = int(max_value - min_value ) + 1
snake_case = [[] for _ in range(A )]
for i in my_list:
buckets[int(i - min_value )].append(A )
return [v for bucket in buckets for v in sorted(A )]
if __name__ == "__main__":
from doctest import testmod
testmod()
assert bucket_sort([4, 5, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bucket_sort([0, 1, -1_0, 1_5, 2, -2]) == [-1_0, -2, 0, 1, 2, 1_5]
| 332 | 1 |
from __future__ import annotations
def _snake_case ( lowerCAmelCase : list[list[int]] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[int] = len(lowerCAmelCase )
# We need to create solution object to save path.
SCREAMING_SNAKE_CASE_ : str = [[0 for _ in range(lowerCAmelCase )] for _ in range(lowerCAmelCase )]
SCREAMING_SNAKE_CASE_ : Union[str, Any] = run_maze(lowerCAmelCase , 0 , 0 , lowerCAmelCase )
if solved:
print("\n".join(str(lowerCAmelCase ) for row in solutions ) )
else:
print("No solution exists!" )
return solved
def _snake_case ( lowerCAmelCase : list[list[int]] , lowerCAmelCase : int , lowerCAmelCase : int , lowerCAmelCase : list[list[int]] ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ : Optional[Any] = len(lowerCAmelCase )
# Final check point.
if i == j == (size - 1):
SCREAMING_SNAKE_CASE_ : List[str] = 1
return True
SCREAMING_SNAKE_CASE_ : List[Any] = (not i < 0) and (not j < 0) # Check lower bounds
SCREAMING_SNAKE_CASE_ : List[str] = (i < size) and (j < size) # Check upper bounds
if lower_flag and upper_flag:
# check for already visited and block points.
SCREAMING_SNAKE_CASE_ : int = (not solutions[i][j]) and (not maze[i][j])
if block_flag:
# check visited
SCREAMING_SNAKE_CASE_ : Union[str, Any] = 1
# check for directions
if (
run_maze(lowerCAmelCase , i + 1 , lowerCAmelCase , lowerCAmelCase )
or run_maze(lowerCAmelCase , lowerCAmelCase , j + 1 , lowerCAmelCase )
or run_maze(lowerCAmelCase , i - 1 , lowerCAmelCase , lowerCAmelCase )
or run_maze(lowerCAmelCase , lowerCAmelCase , j - 1 , lowerCAmelCase )
):
return True
SCREAMING_SNAKE_CASE_ : List[str] = 0
return False
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 18 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_A = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["NllbTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_A = ["NllbTokenizerFast"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
_A = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 231 | 0 |
def a__ ( UpperCAmelCase : int ) -> bool:
UpperCAmelCase : int = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def a__ ( UpperCAmelCase : int = 5_000 ) -> int:
UpperCAmelCase : int = [(i * (3 * i - 1)) // 2 for i in range(1 , UpperCAmelCase )]
for i, pentagonal_i in enumerate(UpperCAmelCase ):
for j in range(UpperCAmelCase , len(UpperCAmelCase ) ):
UpperCAmelCase : int = pentagonal_nums[j]
UpperCAmelCase : List[Any] = pentagonal_i + pentagonal_j
UpperCAmelCase : Optional[Any] = pentagonal_j - pentagonal_i
if is_pentagonal(UpperCAmelCase ) and is_pentagonal(UpperCAmelCase ):
return b
return -1
if __name__ == "__main__":
print(f"""{solution() = }""")
| 99 |
import logging
import sys
from dataclasses import dataclass, field
from typing import Any, Dict, List, Optional, Union
import librosa
import torch
from datasets import DatasetDict, load_dataset
from packaging import version
from torch import nn
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForPreTraining,
is_apex_available,
trainer_utils,
)
from transformers.models.wavaveca.modeling_wavaveca import _compute_mask_indices
if is_apex_available():
from apex import amp
if version.parse(version.parse(torch.__version__).base_version) >= version.parse("1.6"):
_lowerCamelCase : List[str] = True
from torch.cuda.amp import autocast
_lowerCamelCase : Any = logging.getLogger(__name__)
@dataclass
class __UpperCAmelCase :
UpperCamelCase = field(
metadata={"""help""": """Path to pretrained model or model identifier from huggingface.co/models"""} )
UpperCamelCase = field(
default=lowerCamelCase__ , metadata={"""help""": """Where do you want to store the pretrained models downloaded from huggingface.co"""} , )
UpperCamelCase = field(
default=lowerCamelCase__ , metadata={"""help""": """Whether to freeze the feature extractor layers of the model."""} )
UpperCamelCase = field(
default=lowerCamelCase__ , metadata={"""help""": """Whether to log verbose messages or not."""} , )
UpperCamelCase = field(
default=2.0 , metadata={"""help""": """Maximum temperature for gumbel softmax."""} )
UpperCamelCase = field(
default=0.5 , metadata={"""help""": """Minimum temperature for gumbel softmax."""} )
UpperCamelCase = field(
default=0.9_9_9_9_9_5 , metadata={"""help""": """Decay of gumbel temperature during training."""} )
def a__ ( UpperCAmelCase : ModelArguments , UpperCAmelCase : TrainingArguments ) -> Any:
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , handlers=[logging.StreamHandler(sys.stdout )] , )
UpperCAmelCase : Any = logging.WARNING
if model_args.verbose_logging:
UpperCAmelCase : Any = logging.DEBUG
elif trainer_utils.is_main_process(training_args.local_rank ):
UpperCAmelCase : Any = logging.INFO
logger.setLevel(UpperCAmelCase )
@dataclass
class __UpperCAmelCase :
UpperCamelCase = field(
default=lowerCamelCase__ , metadata={"""help""": """The name of the dataset to use (via the datasets library)."""} )
UpperCamelCase = field(
default=lowerCamelCase__ , metadata={"""help""": """The configuration name of the dataset to use (via the datasets library)."""} )
UpperCamelCase = field(
default="""train""" , metadata={
"""help""": """The name of the training data set split to use (via the datasets library). Defaults to 'train'"""
} , )
UpperCamelCase = field(
default="""validation""" , metadata={
"""help""": (
"""The name of the validation data set split to use (via the datasets library). Defaults to 'validation'"""
)
} , )
UpperCamelCase = field(
default="""file""" , metadata={"""help""": """Column in the dataset that contains speech file path. Defaults to 'file'"""} , )
UpperCamelCase = field(
default=lowerCamelCase__ , metadata={"""help""": """Overwrite the cached preprocessed datasets or not."""} )
UpperCamelCase = field(
default=1 , metadata={
"""help""": """The percentage of the train set used as validation set in case there's no validation split"""
} , )
UpperCamelCase = field(
default=lowerCamelCase__ , metadata={"""help""": """The number of processes to use for the preprocessing."""} , )
UpperCamelCase = field(
default=2_0.0 , metadata={"""help""": """Filter audio files that are longer than `max_duration_in_seconds` seconds"""} )
@dataclass
class __UpperCAmelCase :
UpperCamelCase = 42
UpperCamelCase = 42
UpperCamelCase = "longest"
UpperCamelCase = None
UpperCamelCase = None
def __call__( self : int, __A : List[Dict[str, Union[List[int], torch.Tensor]]] ):
# reformat list to dict and set to pytorch format
UpperCAmelCase : List[Any] = self.feature_extractor.pad(
__A, max_length=self.max_length, padding=self.padding, pad_to_multiple_of=self.pad_to_multiple_of, return_tensors='''pt''', )
UpperCAmelCase : int = self.model._get_feat_extract_output_lengths(batch['''input_values'''].shape[-1] )
UpperCAmelCase : Tuple = batch['''input_values'''].shape[0]
# make sure that no loss is computed on padded inputs
if batch["attention_mask"] is not None:
# compute real output lengths according to convolution formula
UpperCAmelCase : Tuple = self.model._get_feat_extract_output_lengths(batch['''attention_mask'''].sum(-1 ) ).to(
torch.long )
UpperCAmelCase : Dict = torch.zeros(
(batch_size, mask_indices_seq_length), dtype=torch.long, device=batch['''input_values'''].device )
# these two operations makes sure that all values
# before the output lengths indices are attended to
UpperCAmelCase : Tuple = 1
UpperCAmelCase : int = attention_mask.flip([-1] ).cumsum(-1 ).flip([-1] ).bool()
# sample randomly masked indices
UpperCAmelCase : Dict = _compute_mask_indices(
(batch_size, mask_indices_seq_length), self.model.config.mask_time_prob, self.model.config.mask_time_length, attention_mask=__A, min_masks=2, )
return batch
class __UpperCAmelCase ( lowerCamelCase__ ):
def __init__( self : Union[str, Any], *__A : int, __A : Dict=1, __A : Any=0, __A : Optional[Any]=1.0, **__A : Any ):
super().__init__(*__A, **__A )
UpperCAmelCase : Any = 0
UpperCAmelCase : Any = max_gumbel_temp
UpperCAmelCase : Optional[Any] = min_gumbel_temp
UpperCAmelCase : str = gumbel_temp_decay
def __magic_name__ ( self : Dict, __A : nn.Module, __A : Dict[str, Union[torch.Tensor, Any]] ):
model.train()
UpperCAmelCase : List[Any] = self._prepare_inputs(__A )
if self.use_amp:
with autocast():
UpperCAmelCase : Optional[Any] = self.compute_loss(__A, __A )
else:
UpperCAmelCase : Optional[int] = self.compute_loss(__A, __A )
if self.args.n_gpu > 1 or self.deepspeed:
if model.module.config.ctc_loss_reduction == "mean":
UpperCAmelCase : Optional[Any] = loss.mean()
elif model.module.config.ctc_loss_reduction == "sum":
UpperCAmelCase : str = loss.sum() / (inputs['''mask_time_indices''']).sum()
else:
raise ValueError(F'''{model.config.ctc_loss_reduction} is not valid. Choose one of [\'mean\', \'sum\']''' )
if self.args.gradient_accumulation_steps > 1:
UpperCAmelCase : Any = loss / self.args.gradient_accumulation_steps
if self.use_amp:
self.scaler.scale(__A ).backward()
elif self.use_apex:
with amp.scale_loss(__A, self.optimizer ) as scaled_loss:
scaled_loss.backward()
elif self.deepspeed:
self.deepspeed.backward(__A )
else:
loss.backward()
self.num_update_step += 1
# make sure gumbel softmax temperature is decayed
if self.args.n_gpu > 1 or self.deepspeed:
model.module.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp ) )
else:
model.set_gumbel_temperature(
max(self.max_gumbel_temp * self.gumbel_temp_decay**self.num_update_step, self.min_gumbel_temp ) )
return loss.detach()
def a__ ( ) -> Union[str, Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase : List[Any] = parser.parse_args_into_dataclasses()
configure_logger(UpperCAmelCase , UpperCAmelCase )
# Downloading and loading a dataset from the hub.
UpperCAmelCase : int = load_dataset(data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
if "validation" not in datasets.keys():
# make sure only "validation" and "train" keys remain"
UpperCAmelCase : Union[str, Any] = DatasetDict()
UpperCAmelCase : Any = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''{data_args.train_split_name}[:{data_args.validation_split_percentage}%]''' , cache_dir=model_args.cache_dir , )
UpperCAmelCase : Tuple = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''{data_args.train_split_name}[{data_args.validation_split_percentage}%:]''' , cache_dir=model_args.cache_dir , )
else:
# make sure only "validation" and "train" keys remain"
UpperCAmelCase : Optional[Any] = DatasetDict()
UpperCAmelCase : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split='''validation''' , cache_dir=model_args.cache_dir , )
UpperCAmelCase : int = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=f'''{data_args.train_split_name}''' , cache_dir=model_args.cache_dir , )
# only normalized-inputs-training is supported
UpperCAmelCase : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , do_normalize=UpperCAmelCase )
def prepare_dataset(UpperCAmelCase : Dict ):
# check that all files have the correct sampling rate
UpperCAmelCase , UpperCAmelCase : Optional[Any] = librosa.load(batch[data_args.speech_file_column] , sr=feature_extractor.sampling_rate )
return batch
# load audio files into numpy arrays
UpperCAmelCase : str = datasets.map(
UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , remove_columns=datasets['''train'''].column_names )
# filter audio files that are too long
UpperCAmelCase : int = vectorized_datasets.filter(
lambda UpperCAmelCase : len(data['''speech'''] ) < int(data_args.max_duration_in_seconds * feature_extractor.sampling_rate ) )
def normalize(UpperCAmelCase : Dict ):
return feature_extractor(batch['''speech'''] , sampling_rate=feature_extractor.sampling_rate )
# normalize and transform to `BatchFeatures`
UpperCAmelCase : Any = vectorized_datasets.map(
UpperCAmelCase , batched=UpperCAmelCase , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , remove_columns=vectorized_datasets['''train'''].column_names , )
# pretraining is only supported for "newer" stable layer norm architecture
# apply_spec_augment has to be True, mask_feature_prob has to be 0.0
UpperCAmelCase : Optional[int] = WavaVecaConfig.from_pretrained(
model_args.model_name_or_path , cache_dir=model_args.cache_dir , gradient_checkpointing=training_args.gradient_checkpointing , )
if not config.do_stable_layer_norm or config.feat_extract_norm != "layer":
raise ValueError(
'''PreTraining is only supported for ``config.do_stable_layer_norm=True`` and'''
''' ``config.feat_extract_norm=\'layer\'''' )
UpperCAmelCase : Any = WavaVecaForPreTraining(UpperCAmelCase )
UpperCAmelCase : int = DataCollatorForWavaVecaPretraining(model=UpperCAmelCase , feature_extractor=UpperCAmelCase )
UpperCAmelCase : Any = WavaVecaPreTrainer(
model=UpperCAmelCase , data_collator=UpperCAmelCase , args=UpperCAmelCase , train_dataset=vectorized_datasets['''train'''] , eval_dataset=vectorized_datasets['''validation'''] , tokenizer=UpperCAmelCase , max_gumbel_temp=model_args.max_gumbel_temperature , min_gumbel_temp=model_args.min_gumbel_temperature , gumbel_temp_decay=model_args.gumbel_temperature_decay , )
trainer.train()
if __name__ == "__main__":
main()
| 99 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
__magic_name__ = {"tokenization_herbert": ["HerbertTokenizer"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ["HerbertTokenizerFast"]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 100 |
"""simple docstring"""
from typing import List, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
"huggingface/informer-tourism-monthly": (
"https://huggingface.co/huggingface/informer-tourism-monthly/resolve/main/config.json"
),
# See all Informer models at https://huggingface.co/models?filter=informer
}
class SCREAMING_SNAKE_CASE_ ( __a ):
"""simple docstring"""
__lowercase : Dict = '''informer'''
__lowercase : Union[str, Any] = {
'''hidden_size''': '''d_model''',
'''num_attention_heads''': '''encoder_attention_heads''',
'''num_hidden_layers''': '''encoder_layers''',
}
def __init__( self , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = "student_t" , lowerCAmelCase__ = "nll" , lowerCAmelCase__ = 1 , lowerCAmelCase__ = None , lowerCAmelCase__ = "mean" , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = 0 , lowerCAmelCase__ = None , lowerCAmelCase__ = None , lowerCAmelCase__ = 6_4 , lowerCAmelCase__ = 3_2 , lowerCAmelCase__ = 3_2 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = 2 , lowerCAmelCase__ = True , lowerCAmelCase__ = "gelu" , lowerCAmelCase__ = 0.05 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 0.1 , lowerCAmelCase__ = 1_0_0 , lowerCAmelCase__ = 0.02 , lowerCAmelCase__=True , lowerCAmelCase__ = "prob" , lowerCAmelCase__ = 5 , lowerCAmelCase__ = True , **lowerCAmelCase__ , ):
# time series specific configuration
__SCREAMING_SNAKE_CASE = prediction_length
__SCREAMING_SNAKE_CASE = context_length or prediction_length
__SCREAMING_SNAKE_CASE = distribution_output
__SCREAMING_SNAKE_CASE = loss
__SCREAMING_SNAKE_CASE = input_size
__SCREAMING_SNAKE_CASE = num_time_features
__SCREAMING_SNAKE_CASE = lags_sequence if lags_sequence is not None else [1, 2, 3, 4, 5, 6, 7]
__SCREAMING_SNAKE_CASE = scaling
__SCREAMING_SNAKE_CASE = num_dynamic_real_features
__SCREAMING_SNAKE_CASE = num_static_real_features
__SCREAMING_SNAKE_CASE = num_static_categorical_features
# set cardinality
if cardinality and num_static_categorical_features > 0:
if len(lowerCAmelCase__) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""")
__SCREAMING_SNAKE_CASE = cardinality
else:
__SCREAMING_SNAKE_CASE = [0]
# set embedding_dimension
if embedding_dimension and num_static_categorical_features > 0:
if len(lowerCAmelCase__) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""")
__SCREAMING_SNAKE_CASE = embedding_dimension
else:
__SCREAMING_SNAKE_CASE = [min(5_0 , (cat + 1) // 2) for cat in self.cardinality]
__SCREAMING_SNAKE_CASE = num_parallel_samples
# Transformer architecture configuration
__SCREAMING_SNAKE_CASE = input_size * len(self.lags_sequence) + self._number_of_features
__SCREAMING_SNAKE_CASE = d_model
__SCREAMING_SNAKE_CASE = encoder_attention_heads
__SCREAMING_SNAKE_CASE = decoder_attention_heads
__SCREAMING_SNAKE_CASE = encoder_ffn_dim
__SCREAMING_SNAKE_CASE = decoder_ffn_dim
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = decoder_layers
__SCREAMING_SNAKE_CASE = dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = encoder_layerdrop
__SCREAMING_SNAKE_CASE = decoder_layerdrop
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = init_std
__SCREAMING_SNAKE_CASE = use_cache
# Informer
__SCREAMING_SNAKE_CASE = attention_type
__SCREAMING_SNAKE_CASE = sampling_factor
__SCREAMING_SNAKE_CASE = distil
super().__init__(is_encoder_decoder=lowerCAmelCase__ , **lowerCAmelCase__)
@property
def snake_case_ ( self):
return (
sum(self.embedding_dimension)
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
)
| 100 | 1 |
"""simple docstring"""
from torch import nn
def snake_case ( A__ ):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(F"""Unsupported activation function: {act_fn}""" )
| 356 |
"""simple docstring"""
from __future__ import annotations
import math
import random
from collections.abc import Collection
from typing import overload
class UpperCamelCase_ :
def __init__( self : Optional[Any] , lowerCAmelCase_ : Collection[float] | None = None ) -> None:
if components is None:
UpperCAmelCase_ : str = []
UpperCAmelCase_ : Optional[Any] = list(lowerCAmelCase_ )
def __len__( self : Union[str, Any] ) -> int:
return len(self.__components )
def __str__( self : List[str] ) -> str:
return "(" + ",".join(map(lowerCAmelCase_ , self.__components ) ) + ")"
def __add__( self : Dict , lowerCAmelCase_ : Vector ) -> Vector:
UpperCAmelCase_ : Optional[int] = len(self )
if size == len(lowerCAmelCase_ ):
UpperCAmelCase_ : Optional[Any] = [self.__components[i] + other.component(lowerCAmelCase_ ) for i in range(lowerCAmelCase_ )]
return Vector(lowerCAmelCase_ )
else:
raise Exception("must have the same size" )
def __sub__( self : List[str] , lowerCAmelCase_ : Vector ) -> Vector:
UpperCAmelCase_ : List[str] = len(self )
if size == len(lowerCAmelCase_ ):
UpperCAmelCase_ : List[Any] = [self.__components[i] - other.component(lowerCAmelCase_ ) for i in range(lowerCAmelCase_ )]
return Vector(lowerCAmelCase_ )
else: # error case
raise Exception("must have the same size" )
@overload
def __mul__( self : Any , lowerCAmelCase_ : float ) -> Vector:
...
@overload
def __mul__( self : Optional[int] , lowerCAmelCase_ : Vector ) -> float:
...
def __mul__( self : Dict , lowerCAmelCase_ : float | Vector ) -> float | Vector:
if isinstance(lowerCAmelCase_ , (float, int) ):
UpperCAmelCase_ : Optional[Any] = [c * other for c in self.__components]
return Vector(lowerCAmelCase_ )
elif isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and len(self ) == len(lowerCAmelCase_ ):
UpperCAmelCase_ : Dict = len(self )
UpperCAmelCase_ : Dict = [self.__components[i] * other.component(lowerCAmelCase_ ) for i in range(lowerCAmelCase_ )]
return sum(lowerCAmelCase_ )
else: # error case
raise Exception("invalid operand!" )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Vector:
return Vector(self.__components )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : int ) -> float:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) and -len(self.__components ) <= i < len(self.__components ):
return self.__components[i]
else:
raise Exception("index out of range" )
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase_ : int , lowerCAmelCase_ : float ) -> None:
assert -len(self.__components ) <= pos < len(self.__components )
UpperCAmelCase_ : List[str] = value
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> float:
if len(self.__components ) == 0:
raise Exception("Vector is empty" )
UpperCAmelCase_ : Union[str, Any] = [c**2 for c in self.__components]
return math.sqrt(sum(lowerCAmelCase_ ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase_ : Vector , lowerCAmelCase_ : bool = False ) -> float:
UpperCAmelCase_ : int = self * other
UpperCAmelCase_ : Tuple = self.euclidean_length() * other.euclidean_length()
if deg:
return math.degrees(math.acos(num / den ) )
else:
return math.acos(num / den )
def snake_case ( A__ ):
assert isinstance(A__ ,A__ )
return Vector([0] * dimension )
def snake_case ( A__ ,A__ ):
assert isinstance(A__ ,A__ ) and (isinstance(A__ ,A__ ))
UpperCAmelCase_ : Any = [0] * dimension
UpperCAmelCase_ : Dict = 1
return Vector(A__ )
def snake_case ( A__ ,A__ ,A__ ):
assert (
isinstance(A__ ,A__ )
and isinstance(A__ ,A__ )
and (isinstance(A__ ,(int, float) ))
)
return x * scalar + y
def snake_case ( A__ ,A__ ,A__ ):
random.seed(A__ )
UpperCAmelCase_ : Tuple = [random.randint(A__ ,A__ ) for _ in range(A__ )]
return Vector(A__ )
class UpperCamelCase_ :
def __init__( self : Optional[Any] , lowerCAmelCase_ : list[list[float]] , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> None:
UpperCAmelCase_ : List[Any] = matrix
UpperCAmelCase_ : List[Any] = w
UpperCAmelCase_ : List[Any] = h
def __str__( self : int ) -> str:
UpperCAmelCase_ : Tuple = ""
for i in range(self.__height ):
ans += "|"
for j in range(self.__width ):
if j < self.__width - 1:
ans += str(self.__matrix[i][j] ) + ","
else:
ans += str(self.__matrix[i][j] ) + "|\n"
return ans
def __add__( self : Any , lowerCAmelCase_ : Matrix ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
UpperCAmelCase_ : List[Any] = []
for i in range(self.__height ):
UpperCAmelCase_ : Optional[Any] = [
self.__matrix[i][j] + other.component(lowerCAmelCase_ , lowerCAmelCase_ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase_ )
return Matrix(lowerCAmelCase_ , self.__width , self.__height )
else:
raise Exception("matrix must have the same dimension!" )
def __sub__( self : Optional[int] , lowerCAmelCase_ : Matrix ) -> Matrix:
if self.__width == other.width() and self.__height == other.height():
UpperCAmelCase_ : Union[str, Any] = []
for i in range(self.__height ):
UpperCAmelCase_ : Union[str, Any] = [
self.__matrix[i][j] - other.component(lowerCAmelCase_ , lowerCAmelCase_ )
for j in range(self.__width )
]
matrix.append(lowerCAmelCase_ )
return Matrix(lowerCAmelCase_ , self.__width , self.__height )
else:
raise Exception("matrices must have the same dimension!" )
@overload
def __mul__( self : Tuple , lowerCAmelCase_ : float ) -> Matrix:
...
@overload
def __mul__( self : Tuple , lowerCAmelCase_ : Vector ) -> Vector:
...
def __mul__( self : Any , lowerCAmelCase_ : float | Vector ) -> Vector | Matrix:
if isinstance(lowerCAmelCase_ , lowerCAmelCase_ ): # matrix-vector
if len(lowerCAmelCase_ ) == self.__width:
UpperCAmelCase_ : Tuple = zero_vector(self.__height )
for i in range(self.__height ):
UpperCAmelCase_ : Any = [
self.__matrix[i][j] * other.component(lowerCAmelCase_ )
for j in range(self.__width )
]
ans.change_component(lowerCAmelCase_ , sum(lowerCAmelCase_ ) )
return ans
else:
raise Exception(
"vector must have the same size as the "
"number of columns of the matrix!" )
elif isinstance(lowerCAmelCase_ , (int, float) ): # matrix-scalar
UpperCAmelCase_ : int = [
[self.__matrix[i][j] * other for j in range(self.__width )]
for i in range(self.__height )
]
return Matrix(lowerCAmelCase_ , self.__width , self.__height )
return None
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
return self.__height
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return self.__width
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> float:
if 0 <= x < self.__height and 0 <= y < self.__width:
return self.__matrix[x][y]
else:
raise Exception("change_component: indices out of bounds" )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int , lowerCAmelCase_ : float ) -> None:
if 0 <= x < self.__height and 0 <= y < self.__width:
UpperCAmelCase_ : List[Any] = value
else:
raise Exception("change_component: indices out of bounds" )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> float:
if self.__height != self.__width:
raise Exception("Matrix is not square" )
UpperCAmelCase_ : Optional[Any] = self.__matrix[:x] + self.__matrix[x + 1 :]
for i in range(len(lowerCAmelCase_ ) ):
UpperCAmelCase_ : Union[str, Any] = minor[i][:y] + minor[i][y + 1 :]
return Matrix(lowerCAmelCase_ , self.__width - 1 , self.__height - 1 ).determinant()
def _SCREAMING_SNAKE_CASE ( self : Tuple , lowerCAmelCase_ : int , lowerCAmelCase_ : int ) -> float:
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if 0 <= x < self.__height and 0 <= y < self.__width:
return (-1) ** (x + y) * self.minor(lowerCAmelCase_ , lowerCAmelCase_ )
else:
raise Exception("Indices out of bounds" )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> float:
if self.__height != self.__width:
raise Exception("Matrix is not square" )
if self.__height < 1:
raise Exception("Matrix has no element" )
elif self.__height == 1:
return self.__matrix[0][0]
elif self.__height == 2:
return (
self.__matrix[0][0] * self.__matrix[1][1]
- self.__matrix[0][1] * self.__matrix[1][0]
)
else:
UpperCAmelCase_ : List[Any] = [
self.__matrix[0][y] * self.cofactor(0 , lowerCAmelCase_ ) for y in range(self.__width )
]
return sum(lowerCAmelCase_ )
def snake_case ( A__ ):
UpperCAmelCase_ : list[list[float]] = [[0] * n for _ in range(A__ )]
return Matrix(A__ ,A__ ,A__ )
def snake_case ( A__ ,A__ ,A__ ,A__ ):
random.seed(A__ )
UpperCAmelCase_ : list[list[float]] = [
[random.randint(A__ ,A__ ) for _ in range(A__ )] for _ in range(A__ )
]
return Matrix(A__ ,A__ ,A__ )
| 253 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase = logging.get_logger(__name__)
__lowerCamelCase = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class UpperCAmelCase ( A_ ):
A__ : List[str] = "megatron-bert"
def __init__(self : Optional[int] , snake_case__ : List[str]=2_90_56 , snake_case__ : List[Any]=10_24 , snake_case__ : str=24 , snake_case__ : Tuple=16 , snake_case__ : Union[str, Any]=40_96 , snake_case__ : str="gelu" , snake_case__ : str=0.1 , snake_case__ : Optional[int]=0.1 , snake_case__ : Tuple=5_12 , snake_case__ : Union[str, Any]=2 , snake_case__ : Dict=0.02 , snake_case__ : List[Any]=1e-12 , snake_case__ : int=0 , snake_case__ : Tuple="absolute" , snake_case__ : Any=True , **snake_case__ : Union[str, Any] , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(pad_token_id=snake_case__ , **snake_case__ )
snake_case : Tuple = vocab_size
snake_case : str = hidden_size
snake_case : str = num_hidden_layers
snake_case : str = num_attention_heads
snake_case : Optional[int] = hidden_act
snake_case : int = intermediate_size
snake_case : List[str] = hidden_dropout_prob
snake_case : Union[str, Any] = attention_probs_dropout_prob
snake_case : Dict = max_position_embeddings
snake_case : List[str] = type_vocab_size
snake_case : List[str] = initializer_range
snake_case : Tuple = layer_norm_eps
snake_case : int = position_embedding_type
snake_case : str = use_cache
| 59 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_UpperCAmelCase : Optional[Any] = {
"""configuration_distilbert""": [
"""DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""DistilBertConfig""",
"""DistilBertOnnxConfig""",
],
"""tokenization_distilbert""": ["""DistilBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Union[str, Any] = ["""DistilBertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = [
"""DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DistilBertForMaskedLM""",
"""DistilBertForMultipleChoice""",
"""DistilBertForQuestionAnswering""",
"""DistilBertForSequenceClassification""",
"""DistilBertForTokenClassification""",
"""DistilBertModel""",
"""DistilBertPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Dict = [
"""TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDistilBertForMaskedLM""",
"""TFDistilBertForMultipleChoice""",
"""TFDistilBertForQuestionAnswering""",
"""TFDistilBertForSequenceClassification""",
"""TFDistilBertForTokenClassification""",
"""TFDistilBertMainLayer""",
"""TFDistilBertModel""",
"""TFDistilBertPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Tuple = [
"""FlaxDistilBertForMaskedLM""",
"""FlaxDistilBertForMultipleChoice""",
"""FlaxDistilBertForQuestionAnswering""",
"""FlaxDistilBertForSequenceClassification""",
"""FlaxDistilBertForTokenClassification""",
"""FlaxDistilBertModel""",
"""FlaxDistilBertPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : List[str] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 174 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
lowerCAmelCase : str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : List[Any] = ["""GPTSw3Tokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
lowerCAmelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 371 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase : List[str] = logging.get_logger(__name__)
lowerCAmelCase : Union[str, Any] = {"""tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase : Tuple = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class __magic_name__ ( UpperCAmelCase__ ):
'''simple docstring'''
__UpperCamelCase = VOCAB_FILES_NAMES
__UpperCamelCase = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase = ["input_ids", "attention_mask"]
__UpperCamelCase = None
def __init__( self , _a=None , _a=None , _a=None , _a="<unk>" , _a="<s>" , _a="</s>" , _a="<pad>" , _a=False , _a=False , **_a , ):
"""simple docstring"""
super().__init__(
_a , _a , tokenizer_file=_a , unk_token=_a , bos_token=_a , eos_token=_a , pad_token=_a , add_prefix_space=_a , clean_up_tokenization_spaces=_a , **_a , )
lowerCamelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , _a ) != add_prefix_space:
lowerCamelCase = getattr(_a , pre_tok_state.pop("""type""" ) )
lowerCamelCase = add_prefix_space
lowerCamelCase = pre_tok_class(**_a )
lowerCamelCase = add_prefix_space
def _lowerCAmelCase ( self , *_a , **_a ):
"""simple docstring"""
lowerCamelCase = kwargs.get("""is_split_into_words""" , _a )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
""" pretokenized inputs.""" )
return super()._batch_encode_plus(*_a , **_a )
def _lowerCAmelCase ( self , *_a , **_a ):
"""simple docstring"""
lowerCamelCase = kwargs.get("""is_split_into_words""" , _a )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with'
""" pretokenized inputs.""" )
return super()._encode_plus(*_a , **_a )
def _lowerCAmelCase ( self , _a , _a = None ):
"""simple docstring"""
lowerCamelCase = self._tokenizer.model.save(_a , name=_a )
return tuple(_a )
def _lowerCAmelCase ( self , _a ):
"""simple docstring"""
lowerCamelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_a , add_special_tokens=_a ) + [self.eos_token_id] )
if len(_a ) > self.model_max_length:
lowerCamelCase = input_ids[-self.model_max_length :]
return input_ids
| 168 | 0 |
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def lowerCAmelCase_ ( __A ) -> int:
'''simple docstring'''
UpperCAmelCase__ = []
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight""",
f"""stage{idx}.patch_embed.proj.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias""",
f"""stage{idx}.patch_embed.proj.bias""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight""",
f"""stage{idx}.patch_embed.norm.weight""",
) )
embed.append(
(
f"""cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias""",
f"""stage{idx}.patch_embed.norm.bias""",
) )
return embed
def lowerCAmelCase_ ( __A, __A ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ = []
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked""",
f"""stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_q.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_k.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj_v.bias""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight""",
f"""stage{idx}.blocks.{cnt}.attn.proj.weight""",
) )
attention_weights.append(
(
f"""cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias""",
f"""stage{idx}.blocks.{cnt}.attn.proj.bias""",
) )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias""", f"""stage{idx}.blocks.{cnt}.mlp.fc2.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight""", f"""stage{idx}.blocks.{cnt}.norm1.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias""", f"""stage{idx}.blocks.{cnt}.norm1.bias""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight""", f"""stage{idx}.blocks.{cnt}.norm2.weight""") )
attention_weights.append(
(f"""cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias""", f"""stage{idx}.blocks.{cnt}.norm2.bias""") )
return attention_weights
def lowerCAmelCase_ ( __A ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ = []
token.append((f"""cvt.encoder.stages.{idx}.cls_token""", "stage2.cls_token") )
return token
def lowerCAmelCase_ ( ) -> int:
'''simple docstring'''
UpperCAmelCase__ = []
head.append(("layernorm.weight", "norm.weight") )
head.append(("layernorm.bias", "norm.bias") )
head.append(("classifier.weight", "head.weight") )
head.append(("classifier.bias", "head.bias") )
return head
def lowerCAmelCase_ ( __A, __A, __A, __A ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = "imagenet-1k-id2label.json"
UpperCAmelCase__ = 1_000
UpperCAmelCase__ = "huggingface/label-files"
UpperCAmelCase__ = num_labels
UpperCAmelCase__ = json.load(open(cached_download(hf_hub_url(__A, __A, repo_type="dataset" ) ), "r" ) )
UpperCAmelCase__ = {int(__A ): v for k, v in idalabel.items()}
UpperCAmelCase__ = idalabel
UpperCAmelCase__ = {v: k for k, v in idalabel.items()}
UpperCAmelCase__ = UpperCAmelCase__ = CvtConfig(num_labels=__A, idalabel=__A, labelaid=__A )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit("/", 1 )[-1][4:6] == "13":
UpperCAmelCase__ = [1, 2, 10]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit("/", 1 )[-1][4:6] == "21":
UpperCAmelCase__ = [1, 4, 16]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
UpperCAmelCase__ = [2, 2, 20]
UpperCAmelCase__ = [3, 12, 16]
UpperCAmelCase__ = [192, 768, 1_024]
UpperCAmelCase__ = CvtForImageClassification(__A )
UpperCAmelCase__ = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" )
UpperCAmelCase__ = image_size
UpperCAmelCase__ = torch.load(__A, map_location=torch.device("cpu" ) )
UpperCAmelCase__ = OrderedDict()
UpperCAmelCase__ = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
UpperCAmelCase__ = list_of_state_dict + cls_token(__A )
UpperCAmelCase__ = list_of_state_dict + embeddings(__A )
for cnt in range(config.depth[idx] ):
UpperCAmelCase__ = list_of_state_dict + attention(__A, __A )
UpperCAmelCase__ = list_of_state_dict + final()
for gg in list_of_state_dict:
print(__A )
for i in range(len(__A ) ):
UpperCAmelCase__ = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(__A )
model.save_pretrained(__A )
image_processor.save_pretrained(__A )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=3_8_4,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=R'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
UpperCamelCase__ = parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 65 | import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {
'vocab_file': 'vocab.json',
'tokenizer_config_file': 'tokenizer_config.json',
'merges_file': 'merges.txt',
}
UpperCamelCase__ = {
'vocab_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'
),
},
'tokenizer_config_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'
),
},
'merges_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'
),
},
}
UpperCamelCase__ = '</w>'
UpperCamelCase__ = '@@ '
def lowerCAmelCase_ ( __A ) -> str:
'''simple docstring'''
UpperCAmelCase__ = set()
UpperCAmelCase__ = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
UpperCAmelCase__ = char
return pairs
# Speech2Text2 has no max input length
UpperCamelCase__ = {'facebook/s2t-wav2vec2-large-en-de': 1_0_2_4}
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : str = VOCAB_FILES_NAMES
__UpperCAmelCase : str = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Dict = ['input_ids', 'attention_mask']
def __init__(self : Tuple , __UpperCAmelCase : List[Any] , __UpperCAmelCase : Dict="<s>" , __UpperCAmelCase : Tuple="<pad>" , __UpperCAmelCase : str="</s>" , __UpperCAmelCase : int="<unk>" , __UpperCAmelCase : List[str]=False , __UpperCAmelCase : str=None , **__UpperCAmelCase : Optional[Any] , ) -> Tuple:
"""simple docstring"""
super().__init__(
unk_token=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , **__UpperCAmelCase , )
UpperCAmelCase__ = do_lower_case
with open(__UpperCAmelCase , encoding="utf-8" ) as vocab_handle:
UpperCAmelCase__ = json.load(__UpperCAmelCase )
UpperCAmelCase__ = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
UpperCAmelCase__ = None
UpperCAmelCase__ = None
else:
with open(__UpperCAmelCase , encoding="utf-8" ) as merges_handle:
UpperCAmelCase__ = merges_handle.read().split("\n" )[:-1]
UpperCAmelCase__ = [tuple(merge.split()[:2] ) for merge in merges]
UpperCAmelCase__ = dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
UpperCAmelCase__ = {}
@property
def lowercase_ (self : List[str] ) -> int:
"""simple docstring"""
return len(self.decoder )
def lowercase_ (self : Union[str, Any] ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def lowercase_ (self : Dict , __UpperCAmelCase : Union[str, Any] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
UpperCAmelCase__ = get_pairs(__UpperCAmelCase )
if not pairs:
return token
while True:
UpperCAmelCase__ = min(__UpperCAmelCase , key=lambda __UpperCAmelCase : self.bpe_ranks.get(__UpperCAmelCase , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
UpperCAmelCase__ , UpperCAmelCase__ = bigram
UpperCAmelCase__ = []
UpperCAmelCase__ = 0
while i < len(__UpperCAmelCase ):
try:
UpperCAmelCase__ = word.index(__UpperCAmelCase , __UpperCAmelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
UpperCAmelCase__ = j
if word[i] == first and i < len(__UpperCAmelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
UpperCAmelCase__ = tuple(__UpperCAmelCase )
UpperCAmelCase__ = new_word
if len(__UpperCAmelCase ) == 1:
break
else:
UpperCAmelCase__ = get_pairs(__UpperCAmelCase )
UpperCAmelCase__ = " ".join(__UpperCAmelCase )
if word == "\n " + BPE_TOKEN_MERGES:
UpperCAmelCase__ = "\n" + BPE_TOKEN_MERGES
if word.endswith(__UpperCAmelCase ):
UpperCAmelCase__ = word.replace(__UpperCAmelCase , "" )
UpperCAmelCase__ = word.replace(" " , __UpperCAmelCase )
UpperCAmelCase__ = word
return word
def lowercase_ (self : Tuple , __UpperCAmelCase : int ) -> Optional[int]:
"""simple docstring"""
if self.bpe_ranks is None:
raise ValueError(
"This tokenizer was instantiated without a `merges.txt` file, so"
" that it can only be used for decoding, not for encoding."
"Make sure to provide `merges.txt` file at instantiation to enable "
"encoding." )
if self.do_lower_case:
UpperCAmelCase__ = text.lower()
UpperCAmelCase__ = text.split()
UpperCAmelCase__ = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__UpperCAmelCase ).split(" " ) ) )
return split_tokens
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : str ) -> int:
"""simple docstring"""
return self.encoder.get(__UpperCAmelCase , self.encoder.get(self.unk_token ) )
def lowercase_ (self : Any , __UpperCAmelCase : int ) -> str:
"""simple docstring"""
UpperCAmelCase__ = self.decoder.get(__UpperCAmelCase , self.unk_token )
return result
def lowercase_ (self : Dict , __UpperCAmelCase : List[str] ) -> str:
"""simple docstring"""
UpperCAmelCase__ = " ".join(__UpperCAmelCase )
# make sure @@ tokens are concatenated
UpperCAmelCase__ = "".join(string.split(__UpperCAmelCase ) )
return string
def lowercase_ (self : Union[str, Any] , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
UpperCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCAmelCase , ensure_ascii=__UpperCAmelCase ) + "\n" )
UpperCAmelCase__ = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__UpperCAmelCase , "w" , encoding="utf-8" ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCAmelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
" Please check that the tokenizer is not corrupted!" )
UpperCAmelCase__ = token_index
writer.write(" ".join(__UpperCAmelCase ) + "\n" )
index += 1
return (vocab_file, merges_file)
| 65 | 1 |
'''simple docstring'''
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase , _UpperCAmelCase : Dict = len(__lowerCAmelCase ), len(grid[0] )
if (
min(__lowerCAmelCase , __lowerCAmelCase ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
_UpperCAmelCase : int = 0
count += depth_first_search(__lowerCAmelCase , row + 1 , __lowerCAmelCase , __lowerCAmelCase )
count += depth_first_search(__lowerCAmelCase , row - 1 , __lowerCAmelCase , __lowerCAmelCase )
count += depth_first_search(__lowerCAmelCase , __lowerCAmelCase , col + 1 , __lowerCAmelCase )
count += depth_first_search(__lowerCAmelCase , __lowerCAmelCase , col - 1 , __lowerCAmelCase )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 322 |
'''simple docstring'''
from collections.abc import Sequence
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
return sum(c * (x**i) for i, c in enumerate(__lowerCAmelCase ) )
def __lowerCAmelCase (__lowerCAmelCase , __lowerCAmelCase ):
_UpperCAmelCase : Dict = 0.0
for coeff in reversed(__lowerCAmelCase ):
_UpperCAmelCase : int = result * x + coeff
return result
if __name__ == "__main__":
lowerCamelCase__ = (0.0, 0.0, 5.0, 9.3, 7.0)
lowerCamelCase__ = 10.0
print(evaluate_poly(poly, x))
print(horner(poly, x))
| 322 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
UpperCamelCase__ = None
UpperCamelCase__ = logging.get_logger(__name__)
UpperCamelCase__ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
UpperCamelCase__ = {
'vocab_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model',
},
'tokenizer_file': {
'xlnet-base-cased': 'https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json',
'xlnet-large-cased': 'https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json',
},
}
UpperCamelCase__ = {
'xlnet-base-cased': None,
'xlnet-large-cased': None,
}
UpperCamelCase__ = '▁'
# Segments (not really needed)
UpperCamelCase__ = 0
UpperCamelCase__ = 1
UpperCamelCase__ = 2
UpperCamelCase__ = 3
UpperCamelCase__ = 4
class A ( UpperCAmelCase_ ):
__UpperCAmelCase : int = VOCAB_FILES_NAMES
__UpperCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : str = 'left'
__UpperCAmelCase : List[str] = XLNetTokenizer
def __init__(self : Union[str, Any] , __UpperCAmelCase : Optional[int]=None , __UpperCAmelCase : List[str]=None , __UpperCAmelCase : List[Any]=False , __UpperCAmelCase : Union[str, Any]=True , __UpperCAmelCase : Union[str, Any]=False , __UpperCAmelCase : str="<s>" , __UpperCAmelCase : Tuple="</s>" , __UpperCAmelCase : List[Any]="<unk>" , __UpperCAmelCase : int="<sep>" , __UpperCAmelCase : Optional[Any]="<pad>" , __UpperCAmelCase : str="<cls>" , __UpperCAmelCase : Union[str, Any]="<mask>" , __UpperCAmelCase : Optional[int]=["<eop>", "<eod>"] , **__UpperCAmelCase : Any , ) -> Any:
"""simple docstring"""
UpperCAmelCase__ = AddedToken(__UpperCAmelCase , lstrip=__UpperCAmelCase , rstrip=__UpperCAmelCase ) if isinstance(__UpperCAmelCase , __UpperCAmelCase ) else mask_token
super().__init__(
vocab_file=__UpperCAmelCase , tokenizer_file=__UpperCAmelCase , do_lower_case=__UpperCAmelCase , remove_space=__UpperCAmelCase , keep_accents=__UpperCAmelCase , bos_token=__UpperCAmelCase , eos_token=__UpperCAmelCase , unk_token=__UpperCAmelCase , sep_token=__UpperCAmelCase , pad_token=__UpperCAmelCase , cls_token=__UpperCAmelCase , mask_token=__UpperCAmelCase , additional_special_tokens=__UpperCAmelCase , **__UpperCAmelCase , )
UpperCAmelCase__ = 3
UpperCAmelCase__ = do_lower_case
UpperCAmelCase__ = remove_space
UpperCAmelCase__ = keep_accents
UpperCAmelCase__ = vocab_file
UpperCAmelCase__ = False if not self.vocab_file else True
def lowercase_ (self : Optional[Any] , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def lowercase_ (self : str , __UpperCAmelCase : List[int] , __UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
UpperCAmelCase__ = [self.sep_token_id]
UpperCAmelCase__ = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def lowercase_ (self : int , __UpperCAmelCase : str , __UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not self.can_save_slow_tokenizer:
raise ValueError(
"Your fast tokenizer does not have the necessary information to save the vocabulary for a slow "
"tokenizer." )
if not os.path.isdir(__UpperCAmelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
UpperCAmelCase__ = os.path.join(
__UpperCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCAmelCase ):
copyfile(self.vocab_file , __UpperCAmelCase )
return (out_vocab_file,)
| 65 | import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def lowerCAmelCase_ ( __A ) -> Dict:
'''simple docstring'''
UpperCAmelCase__ = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
"decoder.output_projection.weight",
]
for k in ignore_keys:
state_dict.pop(__A, __A )
def lowerCAmelCase_ ( __A ) -> Optional[int]:
'''simple docstring'''
UpperCAmelCase__ , UpperCAmelCase__ = emb.weight.shape
UpperCAmelCase__ = nn.Linear(__A, __A, bias=__A )
UpperCAmelCase__ = emb.weight.data
return lin_layer
def lowerCAmelCase_ ( __A, __A="facebook/mbart-large-en-ro", __A=False, __A=False ) -> Tuple:
'''simple docstring'''
UpperCAmelCase__ = torch.load(__A, map_location="cpu" )["model"]
remove_ignore_keys_(__A )
UpperCAmelCase__ = state_dict["encoder.embed_tokens.weight"].shape[0]
UpperCAmelCase__ = MBartConfig.from_pretrained(__A, vocab_size=__A )
if mbart_aa and finetuned:
UpperCAmelCase__ = "relu"
UpperCAmelCase__ = state_dict["decoder.embed_tokens.weight"]
UpperCAmelCase__ = MBartForConditionalGeneration(__A )
model.model.load_state_dict(__A )
if finetuned:
UpperCAmelCase__ = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
UpperCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'fairseq_path', type=str, help='bart.large, bart.large.cnn or a path to a model.pt on local filesystem.'
)
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--hf_config',
default='facebook/mbart-large-cc25',
type=str,
help='Which huggingface architecture to use: mbart-large',
)
parser.add_argument('--mbart_50', action='store_true', help='whether the model is mMART-50 checkpoint')
parser.add_argument('--finetuned', action='store_true', help='whether the model is a fine-tuned checkpoint')
UpperCamelCase__ = parser.parse_args()
UpperCamelCase__ = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 65 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class lowercase :
"""simple docstring"""
def __init__( self , UpperCamelCase_ , UpperCamelCase_=12 , UpperCamelCase_=7 , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=True , UpperCamelCase_=99 , UpperCamelCase_=32 , UpperCamelCase_=32 , UpperCamelCase_=2 , UpperCamelCase_=4 , UpperCamelCase_=37 , UpperCamelCase_=0.1 , UpperCamelCase_=0.1 , UpperCamelCase_=512 , UpperCamelCase_=0.02 , UpperCamelCase_=0 , UpperCamelCase_=None , ):
'''simple docstring'''
UpperCamelCase__ :str = parent
UpperCamelCase__ :Any = batch_size
UpperCamelCase__ :Tuple = seq_length
UpperCamelCase__ :int = is_training
UpperCamelCase__ :Optional[int] = use_input_mask
UpperCamelCase__ :Any = use_labels
UpperCamelCase__ :Optional[int] = vocab_size
UpperCamelCase__ :int = hidden_size
UpperCamelCase__ :Optional[int] = projection_dim
UpperCamelCase__ :str = num_hidden_layers
UpperCamelCase__ :Union[str, Any] = num_attention_heads
UpperCamelCase__ :Any = intermediate_size
UpperCamelCase__ :int = dropout
UpperCamelCase__ :Tuple = attention_dropout
UpperCamelCase__ :List[str] = max_position_embeddings
UpperCamelCase__ :Optional[int] = initializer_range
UpperCamelCase__ :Optional[Any] = scope
UpperCamelCase__ :List[str] = bos_token_id
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase__ :List[str] = None
if self.use_input_mask:
UpperCamelCase__ :List[str] = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
UpperCamelCase__ :Optional[int] = input_mask.numpy()
UpperCamelCase__ , UpperCamelCase__ :str = input_mask.shape
UpperCamelCase__ :Optional[Any] = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(UpperCamelCase_ ):
UpperCamelCase__ :Any = 1
UpperCamelCase__ :List[str] = 0
UpperCamelCase__ :int = self.get_config()
return config, input_ids, tf.convert_to_tensor(UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def lowerCAmelCase__ ( self , UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ :Tuple = TFBlipTextModel(config=UpperCamelCase_ )
UpperCamelCase__ :Optional[int] = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , training=UpperCamelCase_ )
UpperCamelCase__ :Any = model(UpperCamelCase_ , training=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = self.prepare_config_and_inputs()
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ :Any = config_and_inputs
UpperCamelCase__ :Tuple = {'''input_ids''': input_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class lowercase ( A__ , unittest.TestCase ):
"""simple docstring"""
_a = (TFBlipTextModel,) if is_tf_available() else ()
_a = False
_a = False
_a = False
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = BlipTextModelTester(self )
UpperCamelCase__ :Dict = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''Blip does not use inputs_embeds''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='''BlipTextModel has no base class and is not available in MODEL_MAPPING''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
pass
@slow
def lowerCAmelCase__ ( self ):
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase__ :Optional[int] = TFBlipTextModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
def lowerCAmelCase__ ( self , UpperCamelCase_=True ):
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=UpperCamelCase_ ) | 219 |
'''simple docstring'''
from __future__ import annotations
import math
def a ( __a ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__a ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def a ( __a ) -> list[int]:
'''simple docstring'''
UpperCamelCase__ :List[Any] = str(__a )
UpperCamelCase__ :Dict = [n]
for i in range(1 , len(__a ) ):
list_nums.append(int(str_num[i:] ) )
list_nums.append(int(str_num[:-i] ) )
return list_nums
def a ( __a ) -> bool:
'''simple docstring'''
if len(str(__a ) ) > 3:
if not is_prime(int(str(__a )[-3:] ) ) or not is_prime(int(str(__a )[:3] ) ):
return False
return True
def a ( __a = 11 ) -> list[int]:
'''simple docstring'''
UpperCamelCase__ :list[int] = []
UpperCamelCase__ :int = 13
while len(__a ) != count:
if validate(__a ):
UpperCamelCase__ :Optional[int] = list_truncated_nums(__a )
if all(is_prime(__a ) for i in list_nums ):
list_truncated_primes.append(__a )
num += 2
return list_truncated_primes
def a ( ) -> int:
'''simple docstring'''
return sum(compute_truncated_primes(11 ) )
if __name__ == "__main__":
print(F"""{sum(compute_truncated_primes(11)) = }""") | 219 | 1 |
"""simple docstring"""
class A_ :
"""simple docstring"""
def __init__( self :List[str] ):
"""simple docstring"""
lowerCamelCase__ : Tuple =''
lowerCamelCase__ : Dict =''
lowerCamelCase__ : Optional[int] =[]
def UpperCAmelCase__ ( self :List[str] , lowerCamelCase_ :int , lowerCamelCase_ :int ):
"""simple docstring"""
if m == -1:
return n + 1
elif n == -1:
return m + 1
elif self.dp[m][n] > -1:
return self.dp[m][n]
else:
if self.worda[m] == self.worda[n]:
lowerCamelCase__ : List[str] =self.__min_dist_top_down_dp(m - 1 , n - 1 )
else:
lowerCamelCase__ : List[str] =self.__min_dist_top_down_dp(lowerCamelCase_ , n - 1 )
lowerCamelCase__ : Optional[int] =self.__min_dist_top_down_dp(m - 1 , lowerCamelCase_ )
lowerCamelCase__ : Dict =self.__min_dist_top_down_dp(m - 1 , n - 1 )
lowerCamelCase__ : Optional[int] =1 + min(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return self.dp[m][n]
def UpperCAmelCase__ ( self :int , lowerCamelCase_ :str , lowerCamelCase_ :str ):
"""simple docstring"""
lowerCamelCase__ : str =worda
lowerCamelCase__ : Any =worda
lowerCamelCase__ : Optional[Any] =[[-1 for _ in range(len(lowerCamelCase_ ) )] for _ in range(len(lowerCamelCase_ ) )]
return self.__min_dist_top_down_dp(len(lowerCamelCase_ ) - 1 , len(lowerCamelCase_ ) - 1 )
def UpperCAmelCase__ ( self :Union[str, Any] , lowerCamelCase_ :str , lowerCamelCase_ :str ):
"""simple docstring"""
lowerCamelCase__ : str =worda
lowerCamelCase__ : Union[str, Any] =worda
lowerCamelCase__ : Optional[int] =len(lowerCamelCase_ )
lowerCamelCase__ : Any =len(lowerCamelCase_ )
lowerCamelCase__ : List[Any] =[[0 for _ in range(n + 1 )] for _ in range(m + 1 )]
for i in range(m + 1 ):
for j in range(n + 1 ):
if i == 0: # first string is empty
lowerCamelCase__ : List[str] =j
elif j == 0: # second string is empty
lowerCamelCase__ : Tuple =i
elif worda[i - 1] == worda[j - 1]: # last characters are equal
lowerCamelCase__ : List[str] =self.dp[i - 1][j - 1]
else:
lowerCamelCase__ : Any =self.dp[i][j - 1]
lowerCamelCase__ : Any =self.dp[i - 1][j]
lowerCamelCase__ : Optional[int] =self.dp[i - 1][j - 1]
lowerCamelCase__ : Optional[Any] =1 + min(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
return self.dp[m][n]
if __name__ == "__main__":
lowerCAmelCase = EditDistance()
print("""****************** Testing Edit Distance DP Algorithm ******************""")
print()
lowerCAmelCase = input("""Enter the first string: """).strip()
lowerCAmelCase = input("""Enter the second string: """).strip()
print()
print(f"""The minimum edit distance is: {solver.min_dist_top_down(Sa, Sa)}""")
print(f"""The minimum edit distance is: {solver.min_dist_bottom_up(Sa, Sa)}""")
print()
print("""*************** End of Testing Edit Distance DP Algorithm ***************""") | 126 |
"""simple docstring"""
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class A_ :
"""simple docstring"""
def __init__( self :str , lowerCamelCase_ :int , lowerCamelCase_ :List[str]=13 , lowerCamelCase_ :List[Any]=7 , lowerCamelCase_ :str=True , lowerCamelCase_ :int=True , lowerCamelCase_ :List[str]=True , lowerCamelCase_ :Any=99 , lowerCamelCase_ :Optional[int]=32 , lowerCamelCase_ :Dict=5 , lowerCamelCase_ :Any=4 , lowerCamelCase_ :Tuple=37 , lowerCamelCase_ :Union[str, Any]="gelu" , lowerCamelCase_ :Dict=0.1 , lowerCamelCase_ :Any=0.1 , lowerCamelCase_ :List[str]=512 , lowerCamelCase_ :int=16 , lowerCamelCase_ :List[str]=2 , lowerCamelCase_ :List[str]=0.02 , lowerCamelCase_ :List[Any]=3 , lowerCamelCase_ :Dict=4 , lowerCamelCase_ :Optional[Any]=None , ):
"""simple docstring"""
lowerCamelCase__ : Any =parent
lowerCamelCase__ : Union[str, Any] =batch_size
lowerCamelCase__ : Dict =seq_length
lowerCamelCase__ : List[str] =is_training
lowerCamelCase__ : List[Any] =use_token_type_ids
lowerCamelCase__ : Union[str, Any] =use_labels
lowerCamelCase__ : Optional[Any] =vocab_size
lowerCamelCase__ : List[Any] =hidden_size
lowerCamelCase__ : Optional[int] =num_hidden_layers
lowerCamelCase__ : Tuple =num_attention_heads
lowerCamelCase__ : Optional[Any] =intermediate_size
lowerCamelCase__ : Optional[int] =hidden_act
lowerCamelCase__ : List[Any] =hidden_dropout_prob
lowerCamelCase__ : str =attention_probs_dropout_prob
lowerCamelCase__ : Tuple =max_position_embeddings
lowerCamelCase__ : Union[str, Any] =type_vocab_size
lowerCamelCase__ : Dict =type_sequence_label_size
lowerCamelCase__ : str =initializer_range
lowerCamelCase__ : Any =num_labels
lowerCamelCase__ : int =num_choices
lowerCamelCase__ : List[str] =scope
lowerCamelCase__ : List[str] =self.vocab_size - 1
def UpperCAmelCase__ ( self :Any ):
"""simple docstring"""
lowerCamelCase__ : str =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCamelCase__ : Union[str, Any] =None
if self.use_token_type_ids:
lowerCamelCase__ : Optional[int] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCamelCase__ : Any =None
lowerCamelCase__ : Any =None
lowerCamelCase__ : str =None
if self.use_labels:
lowerCamelCase__ : int =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : List[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCamelCase__ : Any =ids_tensor([self.batch_size] , self.num_choices )
lowerCamelCase__ : int =OpenAIGPTConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , pad_token_id=self.pad_token_id , )
lowerCamelCase__ : List[str] =ids_tensor([self.num_hidden_layers, self.num_attention_heads] , 2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def UpperCAmelCase__ ( self :List[str] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :List[str] , lowerCamelCase_ :Optional[int] , *lowerCamelCase_ :Any ):
"""simple docstring"""
lowerCamelCase__ : Any =OpenAIGPTModel(config=lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int =model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ , head_mask=lowerCamelCase_ )
lowerCamelCase__ : Optional[int] =model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ )
lowerCamelCase__ : Union[str, Any] =model(lowerCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self :List[str] , lowerCamelCase_ :str , lowerCamelCase_ :Any , lowerCamelCase_ :Dict , lowerCamelCase_ :List[str] , *lowerCamelCase_ :List[Any] ):
"""simple docstring"""
lowerCamelCase__ : int =OpenAIGPTLMHeadModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : int =model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self :Tuple , lowerCamelCase_ :List[str] , lowerCamelCase_ :Union[str, Any] , lowerCamelCase_ :List[str] , lowerCamelCase_ :str , *lowerCamelCase_ :Dict ):
"""simple docstring"""
lowerCamelCase__ : Tuple =OpenAIGPTDoubleHeadsModel(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Optional[Any] =model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self :List[Any] , lowerCamelCase_ :int , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :str , *lowerCamelCase_ :List[Any] ):
"""simple docstring"""
lowerCamelCase__ : List[str] =self.num_labels
lowerCamelCase__ : Tuple =OpenAIGPTForSequenceClassification(lowerCamelCase_ )
model.to(lowerCamelCase_ )
model.eval()
lowerCamelCase__ : Optional[int] =ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCamelCase__ : List[str] =model(lowerCamelCase_ , token_type_ids=lowerCamelCase_ , labels=lowerCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self :List[str] ):
"""simple docstring"""
lowerCamelCase__ : str =self.prepare_config_and_inputs()
(
(
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) , (
lowerCamelCase__
) ,
) : Dict =config_and_inputs
lowerCamelCase__ : Tuple ={
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'head_mask': head_mask,
}
return config, inputs_dict
@require_torch
class A_ ( A__ , A__ , A__ , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
SCREAMING_SNAKE_CASE_ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
SCREAMING_SNAKE_CASE_ = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def UpperCAmelCase__ ( self :int , lowerCamelCase_ :Optional[int] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[Any] , lowerCamelCase_ :List[str] ):
"""simple docstring"""
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def UpperCAmelCase__ ( self :Dict , lowerCamelCase_ :Tuple , lowerCamelCase_ :Optional[Any] , lowerCamelCase_ :List[Any]=False ):
"""simple docstring"""
lowerCamelCase__ : str =super()._prepare_for_class(lowerCamelCase_ , lowerCamelCase_ , return_labels=lowerCamelCase_ )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
lowerCamelCase__ : Dict =torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) , dtype=torch.long , device=lowerCamelCase_ , )
lowerCamelCase__ : Union[str, Any] =inputs_dict['labels']
lowerCamelCase__ : Tuple =inputs_dict['labels']
lowerCamelCase__ : int =torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) , dtype=torch.long , device=lowerCamelCase_ , )
lowerCamelCase__ : Optional[Any] =torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=lowerCamelCase_ )
return inputs_dict
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
lowerCamelCase__ : List[str] =OpenAIGPTModelTester(self )
lowerCamelCase__ : Union[str, Any] =ConfigTester(self , config_class=lowerCamelCase_ , n_embd=37 )
def UpperCAmelCase__ ( self :int ):
"""simple docstring"""
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : Union[str, Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*lowerCamelCase_ )
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*lowerCamelCase_ )
def UpperCAmelCase__ ( self :Union[str, Any] ):
"""simple docstring"""
lowerCamelCase__ : int =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*lowerCamelCase_ )
def UpperCAmelCase__ ( self :str ):
"""simple docstring"""
lowerCamelCase__ : str =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*lowerCamelCase_ )
@slow
def UpperCAmelCase__ ( self :List[str] ):
"""simple docstring"""
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCamelCase__ : Optional[Any] =OpenAIGPTModel.from_pretrained(lowerCamelCase_ )
self.assertIsNotNone(lowerCamelCase_ )
@require_torch
class A_ ( unittest.TestCase ):
"""simple docstring"""
@slow
def UpperCAmelCase__ ( self :Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =OpenAIGPTLMHeadModel.from_pretrained('openai-gpt' )
model.to(lowerCamelCase_ )
lowerCamelCase__ : List[str] =torch.tensor([[481, 4_735, 544]] , dtype=torch.long , device=lowerCamelCase_ ) # the president is
lowerCamelCase__ : List[Any] =[
481,
4_735,
544,
246,
963,
870,
762,
239,
244,
40_477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
lowerCamelCase__ : Tuple =model.generate(lowerCamelCase_ , do_sample=lowerCamelCase_ )
self.assertListEqual(output_ids[0].tolist() , lowerCamelCase_ ) | 126 | 1 |
'''simple docstring'''
lowerCamelCase__ = [sum(int(c, 10) ** 2 for c in i.__str__()) for i in range(100_000)]
def __lowerCAmelCase (__lowerCAmelCase ):
_UpperCAmelCase : Optional[Any] = 0
while number:
# Increased Speed Slightly by checking every 5 digits together.
sum_of_digits_squared += DIGITS_SQUARED[number % 100_000]
number //= 100_000
return sum_of_digits_squared
# There are 2 Chains made,
# One ends with 89 with the chain member 58 being the one which when declared first,
# there will be the least number of iterations for all the members to be checked.
# The other one ends with 1 and has only one element 1.
# So 58 and 1 are chosen to be declared at the starting.
# Changed dictionary to an array to quicken the solution
lowerCamelCase__ = [None] * 10_000_000
lowerCamelCase__ = True
lowerCamelCase__ = False
def __lowerCAmelCase (__lowerCAmelCase ):
if CHAINS[number - 1] is not None:
return CHAINS[number - 1] # type: ignore
_UpperCAmelCase : str = chain(next_number(__lowerCAmelCase ) )
_UpperCAmelCase : Tuple = number_chain
while number < 10_000_000:
_UpperCAmelCase : Union[str, Any] = number_chain
number *= 10
return number_chain
def __lowerCAmelCase (__lowerCAmelCase = 10_000_000 ):
for i in range(1 , __lowerCAmelCase ):
if CHAINS[i] is None:
chain(i + 1 )
return CHAINS[:number].count(__lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(F'''{solution() = }''')
| 322 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
lowerCamelCase__ = transforms.Compose(
[
transforms.Resize((256, 256)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def __lowerCAmelCase (__lowerCAmelCase ):
if isinstance(__lowerCAmelCase , torch.Tensor ):
return image
elif isinstance(__lowerCAmelCase , PIL.Image.Image ):
_UpperCAmelCase : int = [image]
_UpperCAmelCase : str = [trans(img.convert("RGB" ) ) for img in image]
_UpperCAmelCase : Optional[Any] = torch.stack(__lowerCAmelCase )
return image
class lowerCAmelCase__ ( UpperCAmelCase__ ):
def __init__( self : Tuple , lowerCamelCase__ : int , lowerCamelCase__ : int ) ->int:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
_UpperCAmelCase : Tuple = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=lowerCamelCase__ , scheduler=lowerCamelCase__ )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : str ) ->Union[str, Any]:
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(F"""The value of strength should in [0.0, 1.0] but is {strength}""" )
def lowerCAmelCase__ ( self : Dict , lowerCamelCase__ : Dict , lowerCamelCase__ : List[str] , lowerCamelCase__ : int ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : str = min(int(num_inference_steps * strength ) , lowerCamelCase__ )
_UpperCAmelCase : str = max(num_inference_steps - init_timestep , 0 )
_UpperCAmelCase : List[str] = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def lowerCAmelCase__ ( self : List[Any] , lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : str , lowerCamelCase__ : str , lowerCamelCase__ : Dict , lowerCamelCase__ : Optional[Any]=None ) ->str:
'''simple docstring'''
if not isinstance(lowerCamelCase__ , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowerCamelCase__ )}""" )
_UpperCAmelCase : Union[str, Any] = image.to(device=lowerCamelCase__ , dtype=lowerCamelCase__ )
if isinstance(lowerCamelCase__ , lowerCamelCase__ ) and len(lowerCamelCase__ ) != batch_size:
raise ValueError(
F"""You have passed a list of generators of length {len(lowerCamelCase__ )}, but requested an effective batch"""
F""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
_UpperCAmelCase : List[str] = init_latents.shape
_UpperCAmelCase : Optional[int] = randn_tensor(lowerCamelCase__ , generator=lowerCamelCase__ , device=lowerCamelCase__ , dtype=lowerCamelCase__ )
# get latents
print("add noise to latents at timestep" , lowerCamelCase__ )
_UpperCAmelCase : List[Any] = self.scheduler.add_noise(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
_UpperCAmelCase : List[Any] = init_latents
return latents
@torch.no_grad()
def __call__( self : Any , lowerCamelCase__ : Union[torch.FloatTensor, PIL.Image.Image] = None , lowerCamelCase__ : float = 0.8 , lowerCamelCase__ : int = 1 , lowerCamelCase__ : Optional[Union[torch.Generator, List[torch.Generator]]] = None , lowerCamelCase__ : float = 0.0 , lowerCamelCase__ : int = 50 , lowerCamelCase__ : Optional[bool] = None , lowerCamelCase__ : Optional[str] = "pil" , lowerCamelCase__ : bool = True , ) ->Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
self.check_inputs(lowerCamelCase__ )
# 2. Preprocess image
_UpperCAmelCase : Dict = preprocess(lowerCamelCase__ )
# 3. set timesteps
self.scheduler.set_timesteps(lowerCamelCase__ , device=self.device )
_UpperCAmelCase , _UpperCAmelCase : Any = self.get_timesteps(lowerCamelCase__ , lowerCamelCase__ , self.device )
_UpperCAmelCase : List[Any] = timesteps[:1].repeat(lowerCamelCase__ )
# 4. Prepare latent variables
_UpperCAmelCase : Optional[int] = self.prepare_latents(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , self.unet.dtype , self.device , lowerCamelCase__ )
_UpperCAmelCase : Any = latents
# 5. Denoising loop
for t in self.progress_bar(lowerCamelCase__ ):
# 1. predict noise model_output
_UpperCAmelCase : Union[str, Any] = self.unet(lowerCamelCase__ , lowerCamelCase__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_UpperCAmelCase : int = self.scheduler.step(
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , eta=lowerCamelCase__ , use_clipped_model_output=lowerCamelCase__ , generator=lowerCamelCase__ , ).prev_sample
_UpperCAmelCase : Dict = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCAmelCase : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase : str = self.numpy_to_pil(lowerCamelCase__ )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=lowerCamelCase__ )
| 322 | 1 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def UpperCamelCase_( lowerCamelCase_ ) -> Any:
_lowercase : Any = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(lowerCamelCase_ , lowerCamelCase_ )
def UpperCamelCase_( lowerCamelCase_ ) -> str:
_lowercase , _lowercase : List[str] = emb.weight.shape
_lowercase : int = nn.Linear(lowerCamelCase_ , lowerCamelCase_ , bias=lowerCamelCase_ )
_lowercase : List[Any] = emb.weight.data
return lin_layer
def UpperCamelCase_( lowerCamelCase_ ) -> Dict:
_lowercase : Optional[Any] = torch.load(lowerCamelCase_ , map_location='cpu' )
_lowercase : Union[str, Any] = mam_aaa['args'] or mam_aaa['cfg']['model']
_lowercase : Any = mam_aaa['model']
remove_ignore_keys_(lowerCamelCase_ )
_lowercase : List[Any] = state_dict['encoder.embed_tokens.weight'].shape[0]
_lowercase : List[Any] = MaMaaaConfig(
vocab_size=lowerCamelCase_ , max_position_embeddings=1024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , )
_lowercase : Optional[Any] = state_dict['decoder.embed_tokens.weight']
_lowercase : Optional[int] = MaMaaaForConditionalGeneration(lowerCamelCase_ )
model.model.load_state_dict(lowerCamelCase_ , strict=lowerCamelCase_ )
_lowercase : List[str] = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument("fairseq_path", type=str, help="path to a model.pt on local filesystem.")
parser.add_argument("pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
SCREAMING_SNAKE_CASE : Optional[Any] = parser.parse_args()
SCREAMING_SNAKE_CASE : Tuple = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path)
| 21 |
import gc
import random
import unittest
import torch
from diffusers import (
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
)
from diffusers.models.attention_processor import AttnAddedKVProcessor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import floats_tensor, load_numpy, require_torch_gpu, skip_mps, slow, torch_device
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
from . import IFPipelineTesterMixin
@skip_mps
class lowercase ( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , unittest.TestCase ):
lowercase_ : Optional[Any] =IFPipeline
lowercase_ : List[str] =TEXT_TO_IMAGE_PARAMS - {'''width''', '''height''', '''latents'''}
lowercase_ : List[str] =TEXT_TO_IMAGE_BATCH_PARAMS
lowercase_ : int =PipelineTesterMixin.required_optional_params - {'''latents'''}
def A__ ( self):
return self._get_dummy_components()
def A__ ( self ,A__ ,A__=0):
if str(A__).startswith('''mps'''):
lowercase = torch.manual_seed(A__)
else:
lowercase = torch.Generator(device=A__).manual_seed(A__)
lowercase = {
'''prompt''': '''A painting of a squirrel eating a burger''',
'''generator''': generator,
'''num_inference_steps''': 2,
'''output_type''': '''numpy''',
}
return inputs
def A__ ( self):
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != '''cuda''' ,reason='''float16 requires CUDA''')
def A__ ( self):
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1)
def A__ ( self):
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2)
def A__ ( self):
self._test_save_load_local()
def A__ ( self):
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 ,)
@unittest.skipIf(
torch_device != '''cuda''' or not is_xformers_available() ,reason='''XFormers attention is only available with CUDA and `xformers` installed''' ,)
def A__ ( self):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3)
@slow
@require_torch_gpu
class lowercase ( unittest.TestCase ):
def A__ ( self):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self):
# if
lowercase = IFPipeline.from_pretrained('''DeepFloyd/IF-I-XL-v1.0''' ,variant='''fp16''' ,torch_dtype=torch.floataa)
lowercase = IFSuperResolutionPipeline.from_pretrained(
'''DeepFloyd/IF-II-L-v1.0''' ,variant='''fp16''' ,torch_dtype=torch.floataa ,text_encoder=A__ ,tokenizer=A__)
# pre compute text embeddings and remove T5 to save memory
pipe_a.text_encoder.to('''cuda''')
lowercase , lowercase = pipe_a.encode_prompt('''anime turtle''' ,device='''cuda''')
del pipe_a.tokenizer
del pipe_a.text_encoder
gc.collect()
lowercase = None
lowercase = None
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if(A__ ,A__ ,A__ ,A__)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# img2img
lowercase = IFImgaImgPipeline(**pipe_a.components)
lowercase = IFImgaImgSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_imgaimg(A__ ,A__ ,A__ ,A__)
pipe_a.remove_all_hooks()
pipe_a.remove_all_hooks()
# inpainting
lowercase = IFInpaintingPipeline(**pipe_a.components)
lowercase = IFInpaintingSuperResolutionPipeline(**pipe_a.components)
pipe_a.enable_model_cpu_offload()
pipe_a.enable_model_cpu_offload()
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
pipe_a.unet.set_attn_processor(AttnAddedKVProcessor())
self._test_if_inpainting(A__ ,A__ ,A__ ,A__)
def A__ ( self ,A__ ,A__ ,A__ ,A__):
# pipeline 1
_start_torch_memory_measurement()
lowercase = torch.Generator(device='''cpu''').manual_seed(0)
lowercase = pipe_a(
prompt_embeds=A__ ,negative_prompt_embeds=A__ ,num_inference_steps=2 ,generator=A__ ,output_type='''np''' ,)
lowercase = output.images[0]
assert image.shape == (6_4, 6_4, 3)
lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_3 * 1_0**9
lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if.npy''')
assert_mean_pixel_difference(A__ ,A__)
# pipeline 2
_start_torch_memory_measurement()
lowercase = torch.Generator(device='''cpu''').manual_seed(0)
lowercase = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(0)).to(A__)
lowercase = pipe_a(
prompt_embeds=A__ ,negative_prompt_embeds=A__ ,image=A__ ,generator=A__ ,num_inference_steps=2 ,output_type='''np''' ,)
lowercase = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_superresolution_stage_II.npy''')
assert_mean_pixel_difference(A__ ,A__)
def A__ ( self ,A__ ,A__ ,A__ ,A__):
# pipeline 1
_start_torch_memory_measurement()
lowercase = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(0)).to(A__)
lowercase = torch.Generator(device='''cpu''').manual_seed(0)
lowercase = pipe_a(
prompt_embeds=A__ ,negative_prompt_embeds=A__ ,image=A__ ,num_inference_steps=2 ,generator=A__ ,output_type='''np''' ,)
lowercase = output.images[0]
assert image.shape == (6_4, 6_4, 3)
lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img.npy''')
assert_mean_pixel_difference(A__ ,A__)
# pipeline 2
_start_torch_memory_measurement()
lowercase = torch.Generator(device='''cpu''').manual_seed(0)
lowercase = floats_tensor((1, 3, 2_5_6, 2_5_6) ,rng=random.Random(0)).to(A__)
lowercase = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(0)).to(A__)
lowercase = pipe_a(
prompt_embeds=A__ ,negative_prompt_embeds=A__ ,image=A__ ,original_image=A__ ,generator=A__ ,num_inference_steps=2 ,output_type='''np''' ,)
lowercase = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_img2img_superresolution_stage_II.npy''')
assert_mean_pixel_difference(A__ ,A__)
def A__ ( self ,A__ ,A__ ,A__ ,A__):
# pipeline 1
_start_torch_memory_measurement()
lowercase = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(0)).to(A__)
lowercase = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(1)).to(A__)
lowercase = torch.Generator(device='''cpu''').manual_seed(0)
lowercase = pipe_a(
prompt_embeds=A__ ,negative_prompt_embeds=A__ ,image=A__ ,mask_image=A__ ,num_inference_steps=2 ,generator=A__ ,output_type='''np''' ,)
lowercase = output.images[0]
assert image.shape == (6_4, 6_4, 3)
lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 1_0 * 1_0**9
lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting.npy''')
assert_mean_pixel_difference(A__ ,A__)
# pipeline 2
_start_torch_memory_measurement()
lowercase = torch.Generator(device='''cpu''').manual_seed(0)
lowercase = floats_tensor((1, 3, 6_4, 6_4) ,rng=random.Random(0)).to(A__)
lowercase = floats_tensor((1, 3, 2_5_6, 2_5_6) ,rng=random.Random(0)).to(A__)
lowercase = floats_tensor((1, 3, 2_5_6, 2_5_6) ,rng=random.Random(1)).to(A__)
lowercase = pipe_a(
prompt_embeds=A__ ,negative_prompt_embeds=A__ ,image=A__ ,mask_image=A__ ,original_image=A__ ,generator=A__ ,num_inference_steps=2 ,output_type='''np''' ,)
lowercase = output.images[0]
assert image.shape == (2_5_6, 2_5_6, 3)
lowercase = torch.cuda.max_memory_allocated()
assert mem_bytes < 4 * 1_0**9
lowercase = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/if/test_if_inpainting_superresolution_stage_II.npy''')
assert_mean_pixel_difference(A__ ,A__)
def UpperCamelCase ( ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
| 101 | 0 |
import argparse
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import download_controlnet_from_original_ckpt
if __name__ == "__main__":
lowercase__ =argparse.ArgumentParser()
parser.add_argument(
'--checkpoint_path', default=None, type=str, required=True, help='Path to the checkpoint to convert.'
)
parser.add_argument(
'--original_config_file',
type=str,
required=True,
help='The YAML config file corresponding to the original architecture.',
)
parser.add_argument(
'--num_in_channels',
default=None,
type=int,
help='The number of input channels. If `None` number of input channels will be automatically inferred.',
)
parser.add_argument(
'--image_size',
default=512,
type=int,
help=(
'The image size that the model was trained on. Use 512 for Stable Diffusion v1.X and Stable Siffusion v2'
' Base. Use 768 for Stable Diffusion v2.'
),
)
parser.add_argument(
'--extract_ema',
action='store_true',
help=(
'Only relevant for checkpoints that have both EMA and non-EMA weights. Whether to extract the EMA weights'
' or not. Defaults to `False`. Add `--extract_ema` to extract the EMA weights. EMA weights usually yield'
' higher quality images for inference. Non-EMA weights are usually better to continue fine-tuning.'
),
)
parser.add_argument(
'--upcast_attention',
action='store_true',
help=(
'Whether the attention computation should always be upcasted. This is necessary when running stable'
' diffusion 2.1.'
),
)
parser.add_argument(
'--from_safetensors',
action='store_true',
help='If `--checkpoint_path` is in `safetensors` format, load checkpoint with safetensors instead of PyTorch.',
)
parser.add_argument(
'--to_safetensors',
action='store_true',
help='Whether to store pipeline in safetensors format or not.',
)
parser.add_argument('--dump_path', default=None, type=str, required=True, help='Path to the output model.')
parser.add_argument('--device', type=str, help='Device to use (e.g. cpu, cuda:0, cuda:1, etc.)')
def __UpperCamelCase ( lowerCAmelCase__ : List[Any] ):
if string == "True":
return True
elif string == "False":
return False
else:
raise ValueError(f"could not parse string as bool {string}" )
parser.add_argument(
'--use_linear_projection', help='Override for use linear projection', required=False, type=parse_bool
)
parser.add_argument('--cross_attention_dim', help='Override for cross attention_dim', required=False, type=int)
lowercase__ =parser.parse_args()
lowercase__ =download_controlnet_from_original_ckpt(
checkpoint_path=args.checkpoint_path,
original_config_file=args.original_config_file,
image_size=args.image_size,
extract_ema=args.extract_ema,
num_in_channels=args.num_in_channels,
upcast_attention=args.upcast_attention,
from_safetensors=args.from_safetensors,
device=args.device,
use_linear_projection=args.use_linear_projection,
cross_attention_dim=args.cross_attention_dim,
)
controlnet.save_pretrained(args.dump_path, safe_serialization=args.to_safetensors)
| 90 |
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowercase__ =50000
lowercase__ =5000
lowercase__ , lowercase__ =os.path.split(__file__)
lowercase__ =os.path.join(RESULTS_BASEPATH, 'results', RESULTS_FILENAME.replace('.py', '.json'))
@get_duration
def __UpperCamelCase ( lowerCAmelCase__ : datasets.Dataset , lowerCAmelCase__ : List[str] ):
for i in range(lowerCAmelCase__ ):
__a : str = dataset[i]
@get_duration
def __UpperCamelCase ( lowerCAmelCase__ : datasets.Dataset , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Tuple ):
for i in range(0 , len(lowerCAmelCase__ ) , lowerCAmelCase__ ):
__a : Optional[int] = dataset[i : i + batch_size]
@get_duration
def __UpperCamelCase ( lowerCAmelCase__ : datasets.Dataset , lowerCAmelCase__ : Union[str, Any] , lowerCAmelCase__ : str ):
with dataset.formatted_as(type=lowerCAmelCase__ ):
for i in range(lowerCAmelCase__ ):
__a : Dict = dataset[i]
@get_duration
def __UpperCamelCase ( lowerCAmelCase__ : datasets.Dataset , lowerCAmelCase__ : List[str] , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int ):
with dataset.formatted_as(type=lowerCAmelCase__ ):
for i in range(0 , lowerCAmelCase__ , lowerCAmelCase__ ):
__a : int = dataset[i : i + batch_size]
def __UpperCamelCase ( ):
__a : Any = {'''num examples''': SPEED_TEST_N_EXAMPLES}
__a : List[Any] = [
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1_0}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1_0_0}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1_0_0_0}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''pandas''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''torch''', '''length''': SMALL_TEST}),
(read_formatted, {'''type''': '''tensorflow''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1_0}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1_0_0_0}),
]
__a : Union[str, Any] = [
(read, {'''length''': SMALL_TEST}),
(read, {'''length''': SPEED_TEST_N_EXAMPLES}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1_0}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1_0_0}),
(read_batch, {'''length''': SPEED_TEST_N_EXAMPLES, '''batch_size''': 1_0_0_0}),
(read_formatted, {'''type''': '''numpy''', '''length''': SMALL_TEST}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1_0}),
(read_formatted_batch, {'''type''': '''numpy''', '''length''': SMALL_TEST, '''batch_size''': 1_0_0_0}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('''generating dataset''' )
__a : Optional[Any] = datasets.Features(
{'''list''': datasets.Sequence(datasets.Value('''float32''' ) ), '''numbers''': datasets.Value('''float32''' )} )
__a : Optional[int] = generate_example_dataset(
os.path.join(lowerCAmelCase__ , '''dataset.arrow''' ) , lowerCAmelCase__ , num_examples=lowerCAmelCase__ , seq_shapes={'''list''': (1_0_0,)} , )
print('''first set of iterations''' )
for func, kwargs in functions:
print(func.__name__ , str(lowerCAmelCase__ ) )
__a : str = func(lowerCAmelCase__ , **lowerCAmelCase__ )
print('''shuffling dataset''' )
__a : int = dataset.shuffle()
print('''Second set of iterations (after shuffling''' )
for func, kwargs in functions_shuffled:
print('''shuffled ''' , func.__name__ , str(lowerCAmelCase__ ) )
__a : List[Any] = func(
lowerCAmelCase__ , **lowerCAmelCase__ )
with open(lowerCAmelCase__ , '''wb''' ) as f:
f.write(json.dumps(lowerCAmelCase__ ).encode('''utf-8''' ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 90 | 1 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextBackbone, ConvNextForImageClassification, ConvNextModel
from transformers.models.convnext.modeling_convnext import CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __UpperCamelCase :
def __init__( self , __a , __a=13 , __a=32 , __a=3 , __a=4 , __a=[10, 20, 30, 40] , __a=[2, 2, 3, 2] , __a=True , __a=True , __a=37 , __a="gelu" , __a=10 , __a=0.02 , __a=["stage2", "stage3", "stage4"] , __a=[2, 3, 4] , __a=None , ):
'''simple docstring'''
__a : Union[str, Any] = parent
__a : Optional[int] = batch_size
__a : List[Any] = image_size
__a : Dict = num_channels
__a : Optional[int] = num_stages
__a : List[str] = hidden_sizes
__a : Optional[Any] = depths
__a : Any = is_training
__a : Tuple = use_labels
__a : str = intermediate_size
__a : Any = hidden_act
__a : Dict = num_labels
__a : List[str] = initializer_range
__a : Optional[Any] = out_features
__a : List[Any] = out_indices
__a : Any = scope
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : List[str] = None
if self.use_labels:
__a : Optional[int] = ids_tensor([self.batch_size] , self.num_labels )
__a : int = self.get_config()
return config, pixel_values, labels
def __UpperCAmelCase ( self ):
'''simple docstring'''
return ConvNextConfig(
num_channels=self.num_channels , hidden_sizes=self.hidden_sizes , depths=self.depths , num_stages=self.num_stages , hidden_act=self.hidden_act , is_decoder=__a , initializer_range=self.initializer_range , out_features=self.out_features , out_indices=self.out_indices , num_labels=self.num_labels , )
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
__a : List[Any] = ConvNextModel(config=__a )
model.to(__a )
model.eval()
__a : List[str] = model(__a )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
__a : Optional[Any] = ConvNextForImageClassification(__a )
model.to(__a )
model.eval()
__a : Union[str, Any] = model(__a , labels=__a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __UpperCAmelCase ( self , __a , __a , __a ):
'''simple docstring'''
__a : Optional[int] = ConvNextBackbone(config=__a )
model.to(__a )
model.eval()
__a : List[Any] = model(__a )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__a : List[str] = None
__a : List[Any] = ConvNextBackbone(config=__a )
model.to(__a )
model.eval()
__a : Optional[Any] = model(__a )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = self.prepare_config_and_inputs()
__a : List[str] = config_and_inputs
__a : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class __UpperCamelCase ( __A , __A , unittest.TestCase ):
A_ = (
(
ConvNextModel,
ConvNextForImageClassification,
ConvNextBackbone,
)
if is_torch_available()
else ()
)
A_ = (
{"feature-extraction": ConvNextModel, "image-classification": ConvNextForImageClassification}
if is_torch_available()
else {}
)
A_ = True
A_ = False
A_ = False
A_ = False
A_ = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Union[str, Any] = ConvNextModelTester(self )
__a : Optional[int] = ConfigTester(self , config_class=__a , has_text_modality=__a , hidden_size=37 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __UpperCAmelCase ( self ):
'''simple docstring'''
return
@unittest.skip(reason='ConvNext does not use inputs_embeds' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='ConvNext does not support input and output embeddings' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
@unittest.skip(reason='ConvNext does not use feedforward chunking' )
def __UpperCAmelCase ( self ):
'''simple docstring'''
pass
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : List[str] = model_class(__a )
__a : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : str = [*signature.parameters.keys()]
__a : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
def check_hidden_states_output(__a , __a , __a ):
__a : Tuple = model_class(__a )
model.to(__a )
model.eval()
with torch.no_grad():
__a : List[str] = model(**self._prepare_for_class(__a , __a ) )
__a : Union[str, Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__a : int = self.model_tester.num_stages
self.assertEqual(len(__a ) , expected_num_stages + 1 )
# ConvNext's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__a : List[Any] = True
check_hidden_states_output(__a , __a , __a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : Tuple = True
check_hidden_states_output(__a , __a , __a )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__a )
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
for model_name in CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : List[Any] = ConvNextModel.from_pretrained(__a )
self.assertIsNotNone(__a )
def lowerCamelCase ():
__a : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class __UpperCamelCase ( unittest.TestCase ):
@cached_property
def __UpperCAmelCase ( self ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained('facebook/convnext-tiny-224' ) if is_vision_available() else None
@slow
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : int = ConvNextForImageClassification.from_pretrained('facebook/convnext-tiny-224' ).to(__a )
__a : List[str] = self.default_image_processor
__a : Optional[int] = prepare_img()
__a : List[Any] = image_processor(images=__a , return_tensors='pt' ).to(__a )
# forward pass
with torch.no_grad():
__a : Optional[Any] = model(**__a )
# verify the logits
__a : Any = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , __a )
__a : str = torch.tensor([-0.0260, -0.4739, 0.1911] ).to(__a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __a , atol=1E-4 ) )
@require_torch
class __UpperCamelCase ( unittest.TestCase , __A ):
A_ = (ConvNextBackbone,) if is_torch_available() else ()
A_ = ConvNextConfig
A_ = False
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : str = ConvNextModelTester(self )
| 27 | import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
_UpperCAmelCase = {
"""sample_size""": 32,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": 1000,
"""block_out_channels""": [32, 64],
"""attention_head_dim""": 8,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
_UpperCAmelCase = {
"""sample_size""": 64,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 3,
"""num_class_embeds""": 1000,
"""block_out_channels""": [192, 192 * 2, 192 * 3, 192 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """scale_shift""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
_UpperCAmelCase = {
"""sample_size""": 256,
"""in_channels""": 3,
"""out_channels""": 3,
"""layers_per_block""": 2,
"""num_class_embeds""": None,
"""block_out_channels""": [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
"""attention_head_dim""": 64,
"""down_block_types""": [
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""ResnetDownsampleBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
"""AttnDownBlock2D""",
],
"""up_block_types""": [
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""AttnUpBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
"""ResnetUpsampleBlock2D""",
],
"""resnet_time_scale_shift""": """default""",
"""upsample_type""": """resnet""",
"""downsample_type""": """resnet""",
}
_UpperCAmelCase = {
"""num_train_timesteps""": 40,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
_UpperCAmelCase = {
"""num_train_timesteps""": 201,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
_UpperCAmelCase = {
"""num_train_timesteps""": 151,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
def UpperCamelCase ( __lowercase : int ):
'''simple docstring'''
if isinstance(__lowercase ,__lowercase ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError('boolean value expected' )
def UpperCamelCase ( __lowercase : Any ,__lowercase : Tuple ,__lowercase : Optional[int] ,__lowercase : Tuple ,__lowercase : Dict=False ):
'''simple docstring'''
A_ : str = checkpoint[f'''{old_prefix}.in_layers.0.weight''']
A_ : int = checkpoint[f'''{old_prefix}.in_layers.0.bias''']
A_ : int = checkpoint[f'''{old_prefix}.in_layers.2.weight''']
A_ : List[Any] = checkpoint[f'''{old_prefix}.in_layers.2.bias''']
A_ : Optional[int] = checkpoint[f'''{old_prefix}.emb_layers.1.weight''']
A_ : Tuple = checkpoint[f'''{old_prefix}.emb_layers.1.bias''']
A_ : List[Any] = checkpoint[f'''{old_prefix}.out_layers.0.weight''']
A_ : int = checkpoint[f'''{old_prefix}.out_layers.0.bias''']
A_ : Optional[Any] = checkpoint[f'''{old_prefix}.out_layers.3.weight''']
A_ : List[Any] = checkpoint[f'''{old_prefix}.out_layers.3.bias''']
if has_skip:
A_ : Any = checkpoint[f'''{old_prefix}.skip_connection.weight''']
A_ : List[Any] = checkpoint[f'''{old_prefix}.skip_connection.bias''']
return new_checkpoint
def UpperCamelCase ( __lowercase : Tuple ,__lowercase : List[str] ,__lowercase : int ,__lowercase : Optional[Any] ,__lowercase : Union[str, Any]=None ):
'''simple docstring'''
A_ , A_ , A_ : Tuple = checkpoint[f'''{old_prefix}.qkv.weight'''].chunk(3 ,dim=0 )
A_ , A_ , A_ : List[str] = checkpoint[f'''{old_prefix}.qkv.bias'''].chunk(3 ,dim=0 )
A_ : Any = checkpoint[f'''{old_prefix}.norm.weight''']
A_ : str = checkpoint[f'''{old_prefix}.norm.bias''']
A_ : int = weight_q.squeeze(-1 ).squeeze(-1 )
A_ : int = bias_q.squeeze(-1 ).squeeze(-1 )
A_ : List[str] = weight_k.squeeze(-1 ).squeeze(-1 )
A_ : Any = bias_k.squeeze(-1 ).squeeze(-1 )
A_ : List[Any] = weight_v.squeeze(-1 ).squeeze(-1 )
A_ : Dict = bias_v.squeeze(-1 ).squeeze(-1 )
A_ : Any = (
checkpoint[f'''{old_prefix}.proj_out.weight'''].squeeze(-1 ).squeeze(-1 )
)
A_ : Dict = checkpoint[f'''{old_prefix}.proj_out.bias'''].squeeze(-1 ).squeeze(-1 )
return new_checkpoint
def UpperCamelCase ( __lowercase : str ,__lowercase : Union[str, Any] ):
'''simple docstring'''
A_ : Union[str, Any] = torch.load(__lowercase ,map_location='cpu' )
A_ : Dict = {}
A_ : Dict = checkpoint['time_embed.0.weight']
A_ : Any = checkpoint['time_embed.0.bias']
A_ : Union[str, Any] = checkpoint['time_embed.2.weight']
A_ : Tuple = checkpoint['time_embed.2.bias']
if unet_config["num_class_embeds"] is not None:
A_ : Dict = checkpoint['label_emb.weight']
A_ : Tuple = checkpoint['input_blocks.0.0.weight']
A_ : Tuple = checkpoint['input_blocks.0.0.bias']
A_ : str = unet_config['down_block_types']
A_ : List[str] = unet_config['layers_per_block']
A_ : Any = unet_config['attention_head_dim']
A_ : int = unet_config['block_out_channels']
A_ : Union[str, Any] = 1
A_ : List[str] = channels_list[0]
for i, layer_type in enumerate(__lowercase ):
A_ : List[Any] = channels_list[i]
A_ : int = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(__lowercase ):
A_ : Any = f'''down_blocks.{i}.resnets.{j}'''
A_ : str = f'''input_blocks.{current_layer}.0'''
A_ : List[Any] = True if j == 0 and downsample_block_has_skip else False
A_ : Any = convert_resnet(__lowercase ,__lowercase ,__lowercase ,__lowercase ,has_skip=__lowercase )
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(__lowercase ):
A_ : Dict = f'''down_blocks.{i}.resnets.{j}'''
A_ : Optional[int] = f'''input_blocks.{current_layer}.0'''
A_ : str = True if j == 0 and downsample_block_has_skip else False
A_ : int = convert_resnet(__lowercase ,__lowercase ,__lowercase ,__lowercase ,has_skip=__lowercase )
A_ : Optional[Any] = f'''down_blocks.{i}.attentions.{j}'''
A_ : Union[str, Any] = f'''input_blocks.{current_layer}.1'''
A_ : Tuple = convert_attention(
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
current_layer += 1
if i != len(__lowercase ) - 1:
A_ : List[Any] = f'''down_blocks.{i}.downsamplers.0'''
A_ : Dict = f'''input_blocks.{current_layer}.0'''
A_ : Tuple = convert_resnet(__lowercase ,__lowercase ,__lowercase ,__lowercase )
current_layer += 1
A_ : Tuple = current_channels
# hardcoded the mid-block for now
A_ : int = 'mid_block.resnets.0'
A_ : Dict = 'middle_block.0'
A_ : int = convert_resnet(__lowercase ,__lowercase ,__lowercase ,__lowercase )
A_ : Tuple = 'mid_block.attentions.0'
A_ : Any = 'middle_block.1'
A_ : Tuple = convert_attention(__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
A_ : Union[str, Any] = 'mid_block.resnets.1'
A_ : Any = 'middle_block.2'
A_ : int = convert_resnet(__lowercase ,__lowercase ,__lowercase ,__lowercase )
A_ : Tuple = 0
A_ : Optional[Any] = unet_config['up_block_types']
for i, layer_type in enumerate(__lowercase ):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1 ):
A_ : Dict = f'''up_blocks.{i}.resnets.{j}'''
A_ : Dict = f'''output_blocks.{current_layer}.0'''
A_ : List[str] = convert_resnet(__lowercase ,__lowercase ,__lowercase ,__lowercase ,has_skip=__lowercase )
current_layer += 1
if i != len(__lowercase ) - 1:
A_ : Union[str, Any] = f'''up_blocks.{i}.upsamplers.0'''
A_ : List[str] = f'''output_blocks.{current_layer-1}.1'''
A_ : Optional[Any] = convert_resnet(__lowercase ,__lowercase ,__lowercase ,__lowercase )
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1 ):
A_ : int = f'''up_blocks.{i}.resnets.{j}'''
A_ : Union[str, Any] = f'''output_blocks.{current_layer}.0'''
A_ : Optional[int] = convert_resnet(__lowercase ,__lowercase ,__lowercase ,__lowercase ,has_skip=__lowercase )
A_ : Optional[Any] = f'''up_blocks.{i}.attentions.{j}'''
A_ : Any = f'''output_blocks.{current_layer}.1'''
A_ : List[str] = convert_attention(
__lowercase ,__lowercase ,__lowercase ,__lowercase ,__lowercase )
current_layer += 1
if i != len(__lowercase ) - 1:
A_ : Dict = f'''up_blocks.{i}.upsamplers.0'''
A_ : Any = f'''output_blocks.{current_layer-1}.2'''
A_ : List[str] = convert_resnet(__lowercase ,__lowercase ,__lowercase ,__lowercase )
A_ : Any = checkpoint['out.0.weight']
A_ : Dict = checkpoint['out.0.bias']
A_ : int = checkpoint['out.2.weight']
A_ : List[str] = checkpoint['out.2.bias']
return new_checkpoint
if __name__ == "__main__":
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--unet_path""", default=None, type=str, required=True, help="""Path to the unet.pt to convert.""")
parser.add_argument(
"""--dump_path""", default=None, type=str, required=True, help="""Path to output the converted UNet model."""
)
parser.add_argument("""--class_cond""", default=True, type=str, help="""Whether the model is class-conditional.""")
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = strabool(args.class_cond)
_UpperCAmelCase = os.path.basename(args.unet_path)
print(F"""Checkpoint: {ckpt_name}""")
# Get U-Net config
if "imagenet64" in ckpt_name:
_UpperCAmelCase = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_UpperCAmelCase = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
_UpperCAmelCase = TEST_UNET_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
if not args.class_cond:
_UpperCAmelCase = None
_UpperCAmelCase = con_pt_to_diffuser(args.unet_path, unet_config)
_UpperCAmelCase = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
_UpperCAmelCase = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
_UpperCAmelCase = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
_UpperCAmelCase = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(F"""Checkpoint type {ckpt_name} is not currently supported.""")
_UpperCAmelCase = CMStochasticIterativeScheduler(**scheduler_config)
_UpperCAmelCase = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 140 | 0 |
import json
import os
import unittest
from transformers.models.ctrl.tokenization_ctrl import VOCAB_FILES_NAMES, CTRLTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class _snake_case ( _snake_case , unittest.TestCase ):
SCREAMING_SNAKE_CASE__ = CTRLTokenizer
SCREAMING_SNAKE_CASE__ = False
SCREAMING_SNAKE_CASE__ = False
def SCREAMING_SNAKE_CASE__ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
a :int = ['''adapt''', '''re@@''', '''a@@''', '''apt''', '''c@@''', '''t''', '''<unk>''']
a :List[Any] = dict(zip(_lowerCamelCase , range(len(_lowerCamelCase ) ) ) )
a :Tuple = ['''#version: 0.2''', '''a p''', '''ap t</w>''', '''r e''', '''a d''', '''ad apt</w>''', '''''']
a :str = {'''unk_token''': '''<unk>'''}
a :Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
a :Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(_lowerCamelCase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE__ ( self , **_lowerCamelCase ):
kwargs.update(self.special_tokens_map )
return CTRLTokenizer.from_pretrained(self.tmpdirname , **_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :Union[str, Any] = '''adapt react readapt apt'''
a :List[Any] = '''adapt react readapt apt'''
return input_text, output_text
def SCREAMING_SNAKE_CASE__ ( self ):
a :Dict = CTRLTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
a :Dict = '''adapt react readapt apt'''
a :Union[str, Any] = '''adapt re@@ a@@ c@@ t re@@ adapt apt'''.split()
a :List[Any] = tokenizer.tokenize(_lowerCamelCase )
self.assertListEqual(_lowerCamelCase , _lowerCamelCase )
a :Optional[Any] = tokens + [tokenizer.unk_token]
a :List[Any] = [0, 1, 2, 4, 5, 1, 0, 3, 6]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCamelCase ) , _lowerCamelCase )
| 350 |
def __lowerCamelCase ( UpperCAmelCase_ : str ):
"""simple docstring"""
if n_term == "":
return []
a :list = []
for temp in range(int(UpperCAmelCase_ ) ):
series.append(F'''1/{temp + 1}''' if series else '''1''' )
return series
if __name__ == "__main__":
snake_case : Tuple = input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term))
| 281 | 0 |
'''simple docstring'''
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
SCREAMING_SNAKE_CASE_: str =numpy.array([0, 0])
SCREAMING_SNAKE_CASE_: str =numpy.array([0.5, 0.8660254])
SCREAMING_SNAKE_CASE_: Optional[int] =numpy.array([1, 0])
SCREAMING_SNAKE_CASE_: List[Any] =[VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def lowerCAmelCase_ ( snake_case_ : list[numpy.ndarray] , snake_case_ : int ) -> list[numpy.ndarray]:
'''simple docstring'''
UpperCAmelCase_ = initial_vectors
for _ in range(snake_case_ ):
UpperCAmelCase_ = iteration_step(snake_case_ )
return vectors
def lowerCAmelCase_ ( snake_case_ : list[numpy.ndarray] ) -> list[numpy.ndarray]:
'''simple docstring'''
UpperCAmelCase_ = []
for i, start_vector in enumerate(vectors[:-1] ):
UpperCAmelCase_ = vectors[i + 1]
new_vectors.append(snake_case_ )
UpperCAmelCase_ = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def lowerCAmelCase_ ( snake_case_ : numpy.ndarray , snake_case_ : float ) -> numpy.ndarray:
'''simple docstring'''
UpperCAmelCase_ = numpy.radians(snake_case_ )
UpperCAmelCase_ , UpperCAmelCase_ = numpy.cos(snake_case_ ), numpy.sin(snake_case_ )
UpperCAmelCase_ = numpy.array(((c, -s), (s, c)) )
return numpy.dot(snake_case_ , snake_case_ )
def lowerCAmelCase_ ( snake_case_ : list[numpy.ndarray] ) -> None:
'''simple docstring'''
UpperCAmelCase_ = plt.gca()
axes.set_aspect("equal" )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
UpperCAmelCase_ , UpperCAmelCase_ = zip(*snake_case_ )
plt.plot(snake_case_ , snake_case_ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE_: List[str] =iterate(INITIAL_VECTORS, 5)
plot(processed_vectors)
| 1 | '''simple docstring'''
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
SCREAMING_SNAKE_CASE_: Optional[int] =logging.get_logger(__name__)
@add_end_docstrings(
UpperCamelCase__ , r"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class __A ( UpperCamelCase__ ):
def _lowercase (self : str , __a : GenericTensor ):
if self.framework == "tf":
UpperCAmelCase_ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
UpperCAmelCase_ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__a )
else:
raise ValueError("Unsupported framework" )
return masked_index
def _lowercase (self : Tuple , __a : GenericTensor ):
UpperCAmelCase_ = self.get_masked_index(__a )
UpperCAmelCase_ = np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , f"""No mask_token ({self.tokenizer.mask_token}) found on the input""" , )
def _lowercase (self : List[Any] , __a : GenericTensor ):
if isinstance(__a , __a ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["input_ids"][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(__a )
def _lowercase (self : Tuple , __a : Dict , __a : List[str]=None , **__a : Any ):
if return_tensors is None:
UpperCAmelCase_ = self.framework
UpperCAmelCase_ = self.tokenizer(__a , return_tensors=__a )
self.ensure_exactly_one_mask_token(__a )
return model_inputs
def _lowercase (self : str , __a : Optional[int] ):
UpperCAmelCase_ = self.model(**__a )
UpperCAmelCase_ = model_inputs["input_ids"]
return model_outputs
def _lowercase (self : List[str] , __a : Tuple , __a : int=5 , __a : Dict=None ):
# Cap top_k if there are targets
if target_ids is not None and target_ids.shape[0] < top_k:
UpperCAmelCase_ = target_ids.shape[0]
UpperCAmelCase_ = model_outputs["input_ids"][0]
UpperCAmelCase_ = model_outputs["logits"]
if self.framework == "tf":
UpperCAmelCase_ = tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
UpperCAmelCase_ = outputs.numpy()
UpperCAmelCase_ = outputs[0, masked_index, :]
UpperCAmelCase_ = stable_softmax(__a , axis=-1 )
if target_ids is not None:
UpperCAmelCase_ = tf.gather_nd(tf.squeeze(__a , 0 ) , target_ids.reshape(-1 , 1 ) )
UpperCAmelCase_ = tf.expand_dims(__a , 0 )
UpperCAmelCase_ = tf.math.top_k(__a , k=__a )
UpperCAmelCase_ , UpperCAmelCase_ = topk.values.numpy(), topk.indices.numpy()
else:
UpperCAmelCase_ = torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=__a ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
UpperCAmelCase_ = outputs[0, masked_index, :]
UpperCAmelCase_ = logits.softmax(dim=-1 )
if target_ids is not None:
UpperCAmelCase_ = probs[..., target_ids]
UpperCAmelCase_ , UpperCAmelCase_ = probs.topk(__a )
UpperCAmelCase_ = []
UpperCAmelCase_ = values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
UpperCAmelCase_ = []
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
UpperCAmelCase_ = input_ids.numpy().copy()
if target_ids is not None:
UpperCAmelCase_ = target_ids[p].tolist()
UpperCAmelCase_ = p
# Filter padding out:
UpperCAmelCase_ = tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
UpperCAmelCase_ = self.tokenizer.decode(__a , skip_special_tokens=__a )
UpperCAmelCase_ = {"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence}
row.append(__a )
result.append(__a )
if single_mask:
return result[0]
return result
def _lowercase (self : Dict , __a : List[Any] , __a : List[str]=None ):
if isinstance(__a , __a ):
UpperCAmelCase_ = [targets]
try:
UpperCAmelCase_ = self.tokenizer.get_vocab()
except Exception:
UpperCAmelCase_ = {}
UpperCAmelCase_ = []
for target in targets:
UpperCAmelCase_ = vocab.get(__a , __a )
if id_ is None:
UpperCAmelCase_ = self.tokenizer(
__a , add_special_tokens=__a , return_attention_mask=__a , return_token_type_ids=__a , max_length=1 , truncation=__a , )["input_ids"]
if len(__a ) == 0:
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
"We cannot replace it with anything meaningful, ignoring it" )
continue
UpperCAmelCase_ = input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
f"""The specified target token `{target}` does not exist in the model vocabulary. """
f"""Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.""" )
target_ids.append(id_ )
UpperCAmelCase_ = list(set(__a ) )
if len(__a ) == 0:
raise ValueError("At least one target must be provided when passed." )
UpperCAmelCase_ = np.array(__a )
return target_ids
def _lowercase (self : Tuple , __a : Dict=None , __a : List[str]=None ):
UpperCAmelCase_ = {}
if targets is not None:
UpperCAmelCase_ = self.get_target_ids(__a , __a )
UpperCAmelCase_ = target_ids
if top_k is not None:
UpperCAmelCase_ = top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , "The tokenizer does not define a `mask_token`." )
return {}, {}, postprocess_params
def __call__(self : Union[str, Any] , __a : str , *__a : Any , **__a : Tuple ):
UpperCAmelCase_ = super().__call__(__a , **__a )
if isinstance(__a , __a ) and len(__a ) == 1:
return outputs[0]
return outputs
| 1 | 1 |
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
__UpperCAmelCase = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class lowerCamelCase (nn.Module ):
'''simple docstring'''
def __init__( self , _UpperCamelCase ) -> int:
super().__init__()
UpperCAmelCase_ : Any = torchvision.models.resnetaaa(pretrained=_UpperCamelCase )
UpperCAmelCase_ : int = list(model.children() )[:-2]
UpperCAmelCase_ : Union[str, Any] = nn.Sequential(*_UpperCamelCase )
UpperCAmelCase_ : List[Any] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Dict:
# Bx3x224x224 -> Bx2048x7x7 -> Bx2048xN -> BxNx2048
UpperCAmelCase_ : Any = self.pool(self.model(_UpperCamelCase ) )
UpperCAmelCase_ : Dict = torch.flatten(_UpperCamelCase , start_dim=2 )
UpperCAmelCase_ : List[Any] = out.transpose(1 , 2 ).contiguous()
return out # BxNx2048
class lowerCamelCase (_snake_case ):
'''simple docstring'''
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> Any:
UpperCAmelCase_ : List[str] = [json.loads(_UpperCamelCase ) for l in open(_UpperCamelCase )]
UpperCAmelCase_ : Union[str, Any] = os.path.dirname(_UpperCamelCase )
UpperCAmelCase_ : int = tokenizer
UpperCAmelCase_ : List[str] = labels
UpperCAmelCase_ : Dict = len(_UpperCamelCase )
UpperCAmelCase_ : Any = max_seq_length
UpperCAmelCase_ : List[Any] = transforms
def __len__( self ) -> Optional[Any]:
return len(self.data )
def __getitem__( self , _UpperCamelCase ) -> List[Any]:
UpperCAmelCase_ : Any = torch.LongTensor(self.tokenizer.encode(self.data[index]['text'] , add_special_tokens=_UpperCamelCase ) )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : Dict = sentence[0], sentence[1:-1], sentence[-1]
UpperCAmelCase_ : Optional[int] = sentence[: self.max_seq_length]
UpperCAmelCase_ : Dict = torch.zeros(self.n_classes )
UpperCAmelCase_ : Tuple = 1
UpperCAmelCase_ : int = Image.open(os.path.join(self.data_dir , self.data[index]['img'] ) ).convert('RGB' )
UpperCAmelCase_ : Optional[Any] = self.transforms(_UpperCamelCase )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def __UpperCAmelCase ( self ) -> Union[str, Any]:
UpperCAmelCase_ : Dict = Counter()
for row in self.data:
label_freqs.update(row['label'] )
return label_freqs
def lowercase__ ( __snake_case : Dict ):
'''simple docstring'''
UpperCAmelCase_ : Tuple = [len(row['sentence'] ) for row in batch]
UpperCAmelCase_ , UpperCAmelCase_ : Union[str, Any] = len(__snake_case ), max(__snake_case )
UpperCAmelCase_ : List[str] = torch.zeros(__snake_case , __snake_case , dtype=torch.long )
UpperCAmelCase_ : Optional[Any] = torch.zeros(__snake_case , __snake_case , dtype=torch.long )
for i_batch, (input_row, length) in enumerate(zip(__snake_case , __snake_case ) ):
UpperCAmelCase_ : Any = input_row['sentence']
UpperCAmelCase_ : List[Any] = 1
UpperCAmelCase_ : int = torch.stack([row['image'] for row in batch] )
UpperCAmelCase_ : Optional[Any] = torch.stack([row['label'] for row in batch] )
UpperCAmelCase_ : Dict = torch.stack([row['image_start_token'] for row in batch] )
UpperCAmelCase_ : Union[str, Any] = torch.stack([row['image_end_token'] for row in batch] )
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowercase__ ( ):
'''simple docstring'''
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowercase__ ( ):
'''simple docstring'''
return transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.46777044, 0.44531429, 0.40661017] , std=[0.12221994, 0.12145835, 0.14380469] , ),
] )
| 145 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
__UpperCAmelCase = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase (_snake_case , _snake_case ):
'''simple docstring'''
@register_to_config
def __init__( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = None ) -> int:
super().__init__()
UpperCAmelCase_ : str = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
UpperCAmelCase_ : Optional[Any] = torch.zeros(_UpperCamelCase , _UpperCamelCase )
else:
UpperCAmelCase_ : Any = None
UpperCAmelCase_ : Any = torch.nn.Parameter(_UpperCamelCase )
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : VQModel
_snake_case : CLIPTextModel
_snake_case : CLIPTokenizer
_snake_case : TransformeraDModel
_snake_case : LearnedClassifierFreeSamplingEmbeddings
_snake_case : VQDiffusionScheduler
def __init__( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ) -> List[Any]:
super().__init__()
self.register_modules(
vqvae=_UpperCamelCase , transformer=_UpperCamelCase , text_encoder=_UpperCamelCase , tokenizer=_UpperCamelCase , scheduler=_UpperCamelCase , learned_classifier_free_sampling_embeddings=_UpperCamelCase , )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) -> List[str]:
UpperCAmelCase_ : Union[str, Any] = len(_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else 1
# get prompt text embeddings
UpperCAmelCase_ : str = self.tokenizer(
_UpperCamelCase , padding='max_length' , max_length=self.tokenizer.model_max_length , return_tensors='pt' , )
UpperCAmelCase_ : Optional[Any] = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
UpperCAmelCase_ : List[Any] = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
UpperCAmelCase_ : str = text_input_ids[:, : self.tokenizer.model_max_length]
UpperCAmelCase_ : str = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
UpperCAmelCase_ : Dict = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=_UpperCamelCase )
# duplicate text embeddings for each generation per prompt
UpperCAmelCase_ : Dict = prompt_embeds.repeat_interleave(_UpperCamelCase , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
UpperCAmelCase_ : List[str] = self.learned_classifier_free_sampling_embeddings.embeddings
UpperCAmelCase_ : List[str] = negative_prompt_embeds.unsqueeze(0 ).repeat(_UpperCamelCase , 1 , 1 )
else:
UpperCAmelCase_ : List[Any] = [''] * batch_size
UpperCAmelCase_ : List[Any] = text_input_ids.shape[-1]
UpperCAmelCase_ : Dict = self.tokenizer(
_UpperCamelCase , padding='max_length' , max_length=_UpperCamelCase , truncation=_UpperCamelCase , return_tensors='pt' , )
UpperCAmelCase_ : Tuple = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
UpperCAmelCase_ : Dict = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=_UpperCamelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
UpperCAmelCase_ : List[Any] = negative_prompt_embeds.shape[1]
UpperCAmelCase_ : Dict = negative_prompt_embeds.repeat(1 , _UpperCamelCase , 1 )
UpperCAmelCase_ : Any = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _UpperCamelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
UpperCAmelCase_ : Union[str, Any] = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self , _UpperCamelCase , _UpperCamelCase = 1_0_0 , _UpperCamelCase = 5.0 , _UpperCamelCase = 1.0 , _UpperCamelCase = 1 , _UpperCamelCase = None , _UpperCamelCase = None , _UpperCamelCase = "pil" , _UpperCamelCase = True , _UpperCamelCase = None , _UpperCamelCase = 1 , ) -> Union[ImagePipelineOutput, Tuple]:
if isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Any = 1
elif isinstance(_UpperCamelCase , _UpperCamelCase ):
UpperCAmelCase_ : Tuple = len(_UpperCamelCase )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(_UpperCamelCase )}" )
UpperCAmelCase_ : Union[str, Any] = batch_size * num_images_per_prompt
UpperCAmelCase_ : Optional[int] = guidance_scale > 1.0
UpperCAmelCase_ : Any = self._encode_prompt(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_UpperCamelCase , _UpperCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(_UpperCamelCase )}." )
# get the initial completely masked latents unless the user supplied it
UpperCAmelCase_ : Optional[int] = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
UpperCAmelCase_ : Tuple = self.transformer.num_vector_embeds - 1
UpperCAmelCase_ : List[Any] = torch.full(_UpperCamelCase , _UpperCamelCase ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
'Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,'
f" {self.transformer.num_vector_embeds - 1} (inclusive)." )
UpperCAmelCase_ : Any = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_UpperCamelCase , device=self.device )
UpperCAmelCase_ : List[str] = self.scheduler.timesteps.to(self.device )
UpperCAmelCase_ : Union[str, Any] = latents
for i, t in enumerate(self.progress_bar(_UpperCamelCase ) ):
# expand the sample if we are doing classifier free guidance
UpperCAmelCase_ : Union[str, Any] = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
UpperCAmelCase_ : Dict = self.transformer(_UpperCamelCase , encoder_hidden_states=_UpperCamelCase , timestep=_UpperCamelCase ).sample
if do_classifier_free_guidance:
UpperCAmelCase_ , UpperCAmelCase_ : Optional[Any] = model_output.chunk(2 )
UpperCAmelCase_ : Optional[int] = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(_UpperCamelCase , dim=1 , keepdim=_UpperCamelCase )
UpperCAmelCase_ : str = self.truncate(_UpperCamelCase , _UpperCamelCase )
# remove `log(0)`'s (`-inf`s)
UpperCAmelCase_ : Optional[int] = model_output.clamp(-7_0 )
# compute the previous noisy sample x_t -> x_t-1
UpperCAmelCase_ : Union[str, Any] = self.scheduler.step(_UpperCamelCase , timestep=_UpperCamelCase , sample=_UpperCamelCase , generator=_UpperCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
UpperCAmelCase_ : str = self.vqvae.config.vq_embed_dim
UpperCAmelCase_ : Optional[int] = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
UpperCAmelCase_ : int = self.vqvae.quantize.get_codebook_entry(_UpperCamelCase , shape=_UpperCamelCase )
UpperCAmelCase_ : Dict = self.vqvae.decode(_UpperCamelCase , force_not_quantize=_UpperCamelCase ).sample
UpperCAmelCase_ : List[str] = (image / 2 + 0.5).clamp(0 , 1 )
UpperCAmelCase_ : Any = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
UpperCAmelCase_ : int = self.numpy_to_pil(_UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase ) -> torch.FloatTensor:
UpperCAmelCase_ , UpperCAmelCase_ : int = torch.sort(_UpperCamelCase , 1 , descending=_UpperCamelCase )
UpperCAmelCase_ : Dict = torch.exp(_UpperCamelCase )
UpperCAmelCase_ : int = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
UpperCAmelCase_ : Tuple = torch.full_like(keep_mask[:, 0:1, :] , _UpperCamelCase )
UpperCAmelCase_ : List[str] = torch.cat((all_true, keep_mask) , dim=1 )
UpperCAmelCase_ : int = keep_mask[:, :-1, :]
UpperCAmelCase_ : Any = keep_mask.gather(1 , indices.argsort(1 ) )
UpperCAmelCase_ : str = log_p_x_0.clone()
UpperCAmelCase_ : Any = -torch.inf # -inf = log(0)
return rv
| 145 | 1 |
"""simple docstring"""
import math
class __SCREAMING_SNAKE_CASE :
'''simple docstring'''
def __init__( self : Optional[int], lowerCamelCase : Any=0 )-> Any: # a graph with Node 0,1,...,N-1
lowerCamelCase__ : Optional[Any] =n
lowerCamelCase__ : Any =[
[math.inf for j in range(0, SCREAMING_SNAKE_CASE__ )] for i in range(0, SCREAMING_SNAKE_CASE__ )
] # adjacency matrix for weight
lowerCamelCase__ : int =[
[math.inf for j in range(0, SCREAMING_SNAKE_CASE__ )] for i in range(0, SCREAMING_SNAKE_CASE__ )
] # dp[i][j] stores minimum distance from i to j
def snake_case ( self : Optional[Any], lowerCamelCase : Any, lowerCamelCase : List[Any], lowerCamelCase : Optional[int] )-> Optional[Any]:
lowerCamelCase__ : List[Any] =w
def snake_case ( self : Tuple )-> int:
for k in range(0, self.n ):
for i in range(0, self.n ):
for j in range(0, self.n ):
lowerCamelCase__ : Union[str, Any] =min(self.dp[i][j], self.dp[i][k] + self.dp[k][j] )
def snake_case ( self : Any, lowerCamelCase : Optional[int], lowerCamelCase : int )-> Dict:
return self.dp[u][v]
if __name__ == "__main__":
_lowercase : Any = Graph(5)
graph.add_edge(0, 2, 9)
graph.add_edge(0, 4, 1_0)
graph.add_edge(1, 3, 5)
graph.add_edge(2, 3, 7)
graph.add_edge(3, 0, 1_0)
graph.add_edge(3, 1, 2)
graph.add_edge(3, 2, 1)
graph.add_edge(3, 4, 6)
graph.add_edge(4, 1, 3)
graph.add_edge(4, 2, 4)
graph.add_edge(4, 3, 9)
graph.floyd_warshall()
graph.show_min(1, 4)
graph.show_min(0, 3)
| 238 |
import shutil
import tempfile
import unittest
from unittest.mock import patch
from transformers import (
DefaultFlowCallback,
IntervalStrategy,
PrinterCallback,
ProgressCallback,
Trainer,
TrainerCallback,
TrainingArguments,
is_torch_available,
)
from transformers.testing_utils import require_torch
if is_torch_available():
from transformers.trainer import DEFAULT_CALLBACKS
from .test_trainer import RegressionDataset, RegressionModelConfig, RegressionPreTrainedModel
class lowerCAmelCase__ ( __lowercase ):
def __init__( self : int ) -> Optional[int]:
__lowerCamelCase = []
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Tuple:
self.events.append('''on_init_end''' )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Any:
self.events.append('''on_train_begin''' )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Dict , **SCREAMING_SNAKE_CASE__ : int ) -> Any:
self.events.append('''on_train_end''' )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> int:
self.events.append('''on_epoch_begin''' )
def __A ( self : Optional[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[Any] , **SCREAMING_SNAKE_CASE__ : Tuple ) -> Optional[Any]:
self.events.append('''on_epoch_end''' )
def __A ( self : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Optional[Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : str ) -> List[Any]:
self.events.append('''on_step_begin''' )
def __A ( self : Any , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Any , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
self.events.append('''on_step_end''' )
def __A ( self : int , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : int , **SCREAMING_SNAKE_CASE__ : List[Any] ) -> Union[str, Any]:
self.events.append('''on_evaluate''' )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , **SCREAMING_SNAKE_CASE__ : str ) -> str:
self.events.append('''on_predict''' )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Tuple:
self.events.append('''on_save''' )
def __A ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Union[str, Any] , **SCREAMING_SNAKE_CASE__ : List[str] ) -> List[str]:
self.events.append('''on_log''' )
def __A ( self : int , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , **SCREAMING_SNAKE_CASE__ : Optional[int] ) -> str:
self.events.append('''on_prediction_step''' )
@require_torch
class lowerCAmelCase__ ( unittest.TestCase ):
def __A ( self : Union[str, Any] ) -> Optional[Any]:
__lowerCamelCase = tempfile.mkdtemp()
def __A ( self : int ) -> List[str]:
shutil.rmtree(self.output_dir )
def __A ( self : Tuple , SCREAMING_SNAKE_CASE__ : List[Any]=0 , SCREAMING_SNAKE_CASE__ : Any=0 , SCREAMING_SNAKE_CASE__ : List[str]=64 , SCREAMING_SNAKE_CASE__ : Optional[int]=64 , SCREAMING_SNAKE_CASE__ : Optional[int]=None , SCREAMING_SNAKE_CASE__ : Optional[int]=False , **SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Union[str, Any]:
# disable_tqdm in TrainingArguments has a flaky default since it depends on the level of logging. We make sure
# its set to False since the tests later on depend on its value.
__lowerCamelCase = RegressionDataset(length=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = RegressionDataset(length=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = RegressionModelConfig(a=SCREAMING_SNAKE_CASE__ , b=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = RegressionPreTrainedModel(SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = TrainingArguments(self.output_dir , disable_tqdm=SCREAMING_SNAKE_CASE__ , report_to=[] , **SCREAMING_SNAKE_CASE__ )
return Trainer(
SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , train_dataset=SCREAMING_SNAKE_CASE__ , eval_dataset=SCREAMING_SNAKE_CASE__ , callbacks=SCREAMING_SNAKE_CASE__ , )
def __A ( self : List[Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str ) -> Optional[int]:
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , len(SCREAMING_SNAKE_CASE__ ) )
# Order doesn't matter
__lowerCamelCase = sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : cb.__name__ if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else cb.__class__.__name__ )
__lowerCamelCase = sorted(SCREAMING_SNAKE_CASE__ , key=lambda SCREAMING_SNAKE_CASE__ : cb.__name__ if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) else cb.__class__.__name__ )
for cba, cba in zip(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
if isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
elif isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(SCREAMING_SNAKE_CASE__ , cba.__class__ )
elif not isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) and isinstance(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
self.assertEqual(cba.__class__ , SCREAMING_SNAKE_CASE__ )
else:
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def __A ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Tuple:
__lowerCamelCase = ['''on_init_end''', '''on_train_begin''']
__lowerCamelCase = 0
__lowerCamelCase = len(trainer.get_eval_dataloader() )
__lowerCamelCase = ['''on_prediction_step'''] * len(trainer.get_eval_dataloader() ) + ['''on_log''', '''on_evaluate''']
for _ in range(trainer.state.num_train_epochs ):
expected_events.append('''on_epoch_begin''' )
for _ in range(SCREAMING_SNAKE_CASE__ ):
step += 1
expected_events += ["on_step_begin", "on_step_end"]
if step % trainer.args.logging_steps == 0:
expected_events.append('''on_log''' )
if trainer.args.evaluation_strategy == IntervalStrategy.STEPS and step % trainer.args.eval_steps == 0:
expected_events += evaluation_events.copy()
if step % trainer.args.save_steps == 0:
expected_events.append('''on_save''' )
expected_events.append('''on_epoch_end''' )
if trainer.args.evaluation_strategy == IntervalStrategy.EPOCH:
expected_events += evaluation_events.copy()
expected_events += ["on_log", "on_train_end"]
return expected_events
def __A ( self : Union[str, Any] ) -> int:
__lowerCamelCase = self.get_trainer()
__lowerCamelCase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
# Callbacks passed at init are added to the default callbacks
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
expected_callbacks.append(SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
# TrainingArguments.disable_tqdm controls if use ProgressCallback or PrinterCallback
__lowerCamelCase = self.get_trainer(disable_tqdm=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = DEFAULT_CALLBACKS.copy() + [PrinterCallback]
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
def __A ( self : List[Any] ) -> str:
__lowerCamelCase = DEFAULT_CALLBACKS.copy() + [ProgressCallback]
__lowerCamelCase = self.get_trainer()
# We can add, pop, or remove by class name
trainer.remove_callback(SCREAMING_SNAKE_CASE__ )
expected_callbacks.remove(SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.get_trainer()
__lowerCamelCase = trainer.pop_callback(SCREAMING_SNAKE_CASE__ )
self.assertEqual(cb.__class__ , SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
trainer.add_callback(SCREAMING_SNAKE_CASE__ )
expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
# We can also add, pop, or remove by instance
__lowerCamelCase = self.get_trainer()
__lowerCamelCase = trainer.callback_handler.callbacks[0]
trainer.remove_callback(SCREAMING_SNAKE_CASE__ )
expected_callbacks.remove(SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.get_trainer()
__lowerCamelCase = trainer.callback_handler.callbacks[0]
__lowerCamelCase = trainer.pop_callback(SCREAMING_SNAKE_CASE__ )
self.assertEqual(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
trainer.add_callback(SCREAMING_SNAKE_CASE__ )
expected_callbacks.insert(0 , SCREAMING_SNAKE_CASE__ )
self.check_callbacks_equality(trainer.callback_handler.callbacks , SCREAMING_SNAKE_CASE__ )
def __A ( self : Union[str, Any] ) -> Any:
import warnings
# XXX: for now ignore scatter_gather warnings in this test since it's not relevant to what's being tested
warnings.simplefilter(action='''ignore''' , category=SCREAMING_SNAKE_CASE__ )
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
# Independent log/save/eval
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , logging_steps=5 )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , save_steps=5 )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , eval_steps=5 , evaluation_strategy='''steps''' )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
__lowerCamelCase = self.get_trainer(callbacks=[MyTestTrainerCallback] , evaluation_strategy='''epoch''' )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
# A bit of everything
__lowerCamelCase = self.get_trainer(
callbacks=[MyTestTrainerCallback] , logging_steps=3 , save_steps=10 , eval_steps=5 , evaluation_strategy='''steps''' , )
trainer.train()
__lowerCamelCase = trainer.callback_handler.callbacks[-2].events
self.assertEqual(SCREAMING_SNAKE_CASE__ , self.get_expected_events(SCREAMING_SNAKE_CASE__ ) )
# warning should be emitted for duplicated callbacks
with patch('''transformers.trainer_callback.logger.warning''' ) as warn_mock:
__lowerCamelCase = self.get_trainer(
callbacks=[MyTestTrainerCallback, MyTestTrainerCallback] , )
assert str(SCREAMING_SNAKE_CASE__ ) in warn_mock.call_args[0][0]
| 270 | 0 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__UpperCAmelCase = 16
__UpperCAmelCase = 32
def lowercase__ ( __snake_case : Accelerator , __snake_case : int = 16 ):
'''simple docstring'''
UpperCAmelCase_ : str = AutoTokenizer.from_pretrained('bert-base-cased' )
UpperCAmelCase_ : Dict = load_dataset('glue' , 'mrpc' )
def tokenize_function(__snake_case : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase_ : List[Any] = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=__snake_case , max_length=__snake_case )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase_ : Any = datasets.map(
__snake_case , batched=__snake_case , remove_columns=['idx', 'sentence1', 'sentence2'] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase_ : Union[str, Any] = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(__snake_case : Any ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase_ : List[str] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase_ : Dict = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase_ : str = 8
else:
UpperCAmelCase_ : List[Any] = None
return tokenizer.pad(
__snake_case , padding='longest' , max_length=__snake_case , pad_to_multiple_of=__snake_case , return_tensors='pt' , )
# Instantiate dataloaders.
UpperCAmelCase_ : List[Any] = DataLoader(
tokenized_datasets['train'] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case , drop_last=__snake_case )
UpperCAmelCase_ : str = DataLoader(
tokenized_datasets['validation'] , shuffle=__snake_case , collate_fn=__snake_case , batch_size=__snake_case , drop_last=(accelerator.mixed_precision == 'fp8') , )
return train_dataloader, eval_dataloader
def lowercase__ ( __snake_case : Optional[int] , __snake_case : int ):
'''simple docstring'''
UpperCAmelCase_ : str = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase_ : int = config['lr']
UpperCAmelCase_ : List[Any] = int(config['num_epochs'] )
UpperCAmelCase_ : Any = int(config['seed'] )
UpperCAmelCase_ : Optional[Any] = int(config['batch_size'] )
UpperCAmelCase_ : List[Any] = evaluate.load('glue' , 'mrpc' )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase_ : List[Any] = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase_ : Any = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase_ : List[str] = MAX_GPU_BATCH_SIZE
set_seed(__snake_case )
UpperCAmelCase_ , UpperCAmelCase_ : List[Any] = get_dataloaders(__snake_case , __snake_case )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase_ : Optional[int] = AutoModelForSequenceClassification.from_pretrained('bert-base-cased' , return_dict=__snake_case )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase_ : List[Any] = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase_ : Optional[Any] = AdamW(params=model.parameters() , lr=__snake_case )
# Instantiate scheduler
UpperCAmelCase_ : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=__snake_case , num_warmup_steps=100 , num_training_steps=(len(__snake_case ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ : str = accelerator.prepare(
__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
# Now we train the model
for epoch in range(__snake_case ):
model.train()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase_ : Tuple = model(**__snake_case )
UpperCAmelCase_ : Tuple = outputs.loss
UpperCAmelCase_ : Optional[Any] = loss / gradient_accumulation_steps
accelerator.backward(__snake_case )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(__snake_case ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase_ : Union[str, Any] = model(**__snake_case )
UpperCAmelCase_ : int = outputs.logits.argmax(dim=-1 )
UpperCAmelCase_ , UpperCAmelCase_ : Optional[int] = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=__snake_case , references=__snake_case , )
UpperCAmelCase_ : str = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"epoch {epoch}:" , __snake_case )
def lowercase__ ( ):
'''simple docstring'''
UpperCAmelCase_ : Optional[int] = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision' , type=__snake_case , default=__snake_case , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
UpperCAmelCase_ : List[Any] = parser.parse_args()
UpperCAmelCase_ : str = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(__snake_case , __snake_case )
if __name__ == "__main__":
main()
| 145 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, BatchEncoding, PreTrainedTokenizer
from ...utils import logging
__UpperCAmelCase = logging.get_logger(__name__)
__UpperCAmelCase = '▁'
__UpperCAmelCase = {'vocab_file': 'sentencepiece.bpe.model'}
__UpperCAmelCase = {
'vocab_file': {
'facebook/mbart-large-en-ro': (
'https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model'
),
'facebook/mbart-large-cc25': (
'https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model'
),
}
}
__UpperCAmelCase = {
'facebook/mbart-large-en-ro': 1024,
'facebook/mbart-large-cc25': 1024,
}
# fmt: off
__UpperCAmelCase = ['ar_AR', 'cs_CZ', 'de_DE', 'en_XX', 'es_XX', 'et_EE', 'fi_FI', 'fr_XX', 'gu_IN', 'hi_IN', 'it_IT', 'ja_XX', 'kk_KZ', 'ko_KR', 'lt_LT', 'lv_LV', 'my_MM', 'ne_NP', 'nl_XX', 'ro_RO', 'ru_RU', 'si_LK', 'tr_TR', 'vi_VN', 'zh_CN']
class lowerCamelCase (_snake_case ):
'''simple docstring'''
_snake_case : List[Any] = VOCAB_FILES_NAMES
_snake_case : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_snake_case : Tuple = PRETRAINED_VOCAB_FILES_MAP
_snake_case : Any = ['''input_ids''', '''attention_mask''']
_snake_case : List[int] = []
_snake_case : List[int] = []
def __init__( self , _UpperCamelCase , _UpperCamelCase="<s>" , _UpperCamelCase="</s>" , _UpperCamelCase="</s>" , _UpperCamelCase="<s>" , _UpperCamelCase="<unk>" , _UpperCamelCase="<pad>" , _UpperCamelCase="<mask>" , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase = None , _UpperCamelCase=None , **_UpperCamelCase , ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
UpperCAmelCase_ : List[Any] = AddedToken(_UpperCamelCase , lstrip=_UpperCamelCase , rstrip=_UpperCamelCase ) if isinstance(_UpperCamelCase , _UpperCamelCase ) else mask_token
UpperCAmelCase_ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCamelCase , eos_token=_UpperCamelCase , unk_token=_UpperCamelCase , sep_token=_UpperCamelCase , cls_token=_UpperCamelCase , pad_token=_UpperCamelCase , mask_token=_UpperCamelCase , tokenizer_file=_UpperCamelCase , src_lang=_UpperCamelCase , tgt_lang=_UpperCamelCase , additional_special_tokens=_UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **_UpperCamelCase , )
UpperCAmelCase_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCamelCase ) )
UpperCAmelCase_ : int = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
UpperCAmelCase_ : List[str] = {'<s>': 0, '<pad>': 1, '</s>': 2, '<unk>': 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
UpperCAmelCase_ : str = 1
UpperCAmelCase_ : Optional[int] = len(self.sp_model )
UpperCAmelCase_ : str = {
code: self.sp_model_size + i + self.fairseq_offset for i, code in enumerate(_UpperCamelCase )
}
UpperCAmelCase_ : int = {v: k for k, v in self.lang_code_to_id.items()}
UpperCAmelCase_ : List[Any] = len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset
self.fairseq_tokens_to_ids.update(self.lang_code_to_id )
UpperCAmelCase_ : Tuple = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
UpperCAmelCase_ : int = list(self.lang_code_to_id.keys() )
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
self._additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in self._additional_special_tokens] )
UpperCAmelCase_ : Any = src_lang if src_lang is not None else 'en_XX'
UpperCAmelCase_ : Any = self.lang_code_to_id[self._src_lang]
UpperCAmelCase_ : Dict = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
def __getstate__( self ) -> Dict:
UpperCAmelCase_ : Optional[int] = self.__dict__.copy()
UpperCAmelCase_ : str = None
UpperCAmelCase_ : str = self.sp_model.serialized_model_proto()
return state
def __setstate__( self , _UpperCamelCase ) -> Dict:
UpperCAmelCase_ : Union[str, Any] = d
# for backward compatibility
if not hasattr(self , 'sp_model_kwargs' ):
UpperCAmelCase_ : Any = {}
UpperCAmelCase_ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
@property
def __UpperCAmelCase ( self ) -> Tuple:
return len(self.sp_model ) + len(self.lang_code_to_id ) + self.fairseq_offset + 1 # Plus 1 for the mask token
@property
def __UpperCAmelCase ( self ) -> str:
return self._src_lang
@src_lang.setter
def __UpperCAmelCase ( self , _UpperCamelCase ) -> None:
UpperCAmelCase_ : Tuple = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None , _UpperCamelCase = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCamelCase , token_ids_a=_UpperCamelCase , already_has_special_tokens=_UpperCamelCase )
UpperCAmelCase_ : Tuple = [1] * len(self.prefix_tokens )
UpperCAmelCase_ : Dict = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(_UpperCamelCase )) + suffix_ones
return prefix_ones + ([0] * len(_UpperCamelCase )) + ([0] * len(_UpperCamelCase )) + suffix_ones
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]:
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> List[int]:
UpperCAmelCase_ : int = [self.sep_token_id]
UpperCAmelCase_ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , **_UpperCamelCase ) -> int:
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
UpperCAmelCase_ : Optional[int] = src_lang
UpperCAmelCase_ : Dict = self(_UpperCamelCase , add_special_tokens=_UpperCamelCase , return_tensors=_UpperCamelCase , **_UpperCamelCase )
UpperCAmelCase_ : int = self.convert_tokens_to_ids(_UpperCamelCase )
UpperCAmelCase_ : Any = tgt_lang_id
return inputs
def __UpperCAmelCase ( self ) -> str:
UpperCAmelCase_ : str = {self.convert_ids_to_tokens(_UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __UpperCAmelCase ( self , _UpperCamelCase ) -> List[str]:
return self.sp_model.encode(_UpperCamelCase , out_type=_UpperCamelCase )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Tuple:
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
UpperCAmelCase_ : Dict = self.sp_model.PieceToId(_UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def __UpperCAmelCase ( self , _UpperCamelCase ) -> str:
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> Optional[int]:
UpperCAmelCase_ : Optional[int] = ''.join(_UpperCamelCase ).replace(_UpperCamelCase , ' ' ).strip()
return out_string
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = None ) -> Tuple[str]:
if not os.path.isdir(_UpperCamelCase ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
UpperCAmelCase_ : List[Any] = os.path.join(
_UpperCamelCase , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCamelCase , 'wb' ) as fi:
UpperCAmelCase_ : Optional[Any] = self.sp_model.serialized_model_proto()
fi.write(_UpperCamelCase )
return (out_vocab_file,)
def __UpperCAmelCase ( self , _UpperCamelCase , _UpperCamelCase = "en_XX" , _UpperCamelCase = None , _UpperCamelCase = "ro_RO" , **_UpperCamelCase , ) -> BatchEncoding:
UpperCAmelCase_ : Union[str, Any] = src_lang
UpperCAmelCase_ : Dict = tgt_lang
return super().prepare_seqaseq_batch(_UpperCamelCase , _UpperCamelCase , **_UpperCamelCase )
def __UpperCAmelCase ( self ) -> Union[str, Any]:
return self.set_src_lang_special_tokens(self.src_lang )
def __UpperCAmelCase ( self ) -> Optional[Any]:
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def __UpperCAmelCase ( self , _UpperCamelCase ) -> None:
UpperCAmelCase_ : Any = self.lang_code_to_id[src_lang]
UpperCAmelCase_ : Optional[Any] = []
UpperCAmelCase_ : str = [self.eos_token_id, self.cur_lang_code]
def __UpperCAmelCase ( self , _UpperCamelCase ) -> None:
UpperCAmelCase_ : Any = self.lang_code_to_id[lang]
UpperCAmelCase_ : Dict = []
UpperCAmelCase_ : int = [self.eos_token_id, self.cur_lang_code]
| 145 | 1 |
import random
def UpperCamelCase ( _A, _A, _A = False ):
"""simple docstring"""
__magic_name__ : dict = {i: [] for i in range(_A )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(_A )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(_A ):
for j in range(i + 1, _A ):
if random.random() < probability:
graph[i].append(_A )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(_A )
return graph
def UpperCamelCase ( _A ):
"""simple docstring"""
return {
i: [j for j in range(_A ) if i != j] for i in range(_A )
}
if __name__ == "__main__":
import doctest
doctest.testmod()
| 342 |
import unittest
import numpy as np
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class snake_case__ ( unittest.TestCase ):
def __init__( self , lowerCAmelCase__ , lowerCAmelCase__=7 , lowerCAmelCase__=3 , lowerCAmelCase__=18 , lowerCAmelCase__=30 , lowerCAmelCase__=4_00 , lowerCAmelCase__=True , lowerCAmelCase__=None , lowerCAmelCase__=True , lowerCAmelCase__=[0.5, 0.5, 0.5] , lowerCAmelCase__=[0.5, 0.5, 0.5] , ) -> Optional[int]:
__magic_name__ : Optional[Any] = size if size is not None else {"""height""": 18, """width""": 18}
__magic_name__ : str = parent
__magic_name__ : Any = batch_size
__magic_name__ : Any = num_channels
__magic_name__ : List[str] = image_size
__magic_name__ : Tuple = min_resolution
__magic_name__ : Union[str, Any] = max_resolution
__magic_name__ : List[str] = do_resize
__magic_name__ : Optional[Any] = size
__magic_name__ : Optional[Any] = do_normalize
__magic_name__ : Any = image_mean
__magic_name__ : List[str] = image_std
def __magic_name__ ( self ) -> List[str]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class snake_case__ ( _lowerCAmelCase , unittest.TestCase ):
lowercase__ : Any = DPTImageProcessor if is_vision_available() else None
def __magic_name__ ( self ) -> Optional[int]:
__magic_name__ : Dict = DPTImageProcessingTester(self )
@property
def __magic_name__ ( self ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def __magic_name__ ( self ) -> Tuple:
__magic_name__ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_mean""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """image_std""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_normalize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """do_resize""" ) )
self.assertTrue(hasattr(lowerCAmelCase__ , """size""" ) )
def __magic_name__ ( self ) -> List[str]:
__magic_name__ : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
__magic_name__ : Any = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def __magic_name__ ( self ) -> str:
# Initialize image_processing
__magic_name__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__magic_name__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , Image.Image )
# Test not batched input
__magic_name__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__magic_name__ : Optional[int] = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __magic_name__ ( self ) -> Tuple:
# Initialize image_processing
__magic_name__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__magic_name__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , numpify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , np.ndarray )
# Test not batched input
__magic_name__ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__magic_name__ : int = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def __magic_name__ ( self ) -> Optional[Any]:
# Initialize image_processing
__magic_name__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__magic_name__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase__ , torchify=lowerCAmelCase__ )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase__ , torch.Tensor )
# Test not batched input
__magic_name__ : List[str] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
__magic_name__ : Optional[Any] = image_processing(lowerCAmelCase__ , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
| 342 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_tf_available,
is_torch_available,
)
_SCREAMING_SNAKE_CASE = {
"configuration_speech_to_text": ["SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP", "Speech2TextConfig"],
"processing_speech_to_text": ["Speech2TextProcessor"],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["Speech2TextTokenizer"]
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = ["Speech2TextFeatureExtractor"]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFSpeech2TextForConditionalGeneration",
"TFSpeech2TextModel",
"TFSpeech2TextPreTrainedModel",
]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_SCREAMING_SNAKE_CASE = [
"SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Speech2TextForConditionalGeneration",
"Speech2TextModel",
"Speech2TextPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_speech_to_text import SPEECH_TO_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, SpeechaTextConfig
from .processing_speech_to_text import SpeechaTextProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speech_to_text import SpeechaTextTokenizer
try:
if not is_speech_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_speech_to_text import SpeechaTextFeatureExtractor
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_speech_to_text import (
TF_SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFSpeechaTextForConditionalGeneration,
TFSpeechaTextModel,
TFSpeechaTextPreTrainedModel,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speech_to_text import (
SPEECH_TO_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechaTextForConditionalGeneration,
SpeechaTextModel,
SpeechaTextPreTrainedModel,
)
else:
import sys
_SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 3 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class _lowerCAmelCase ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , __snake_case : Optional[Any] , __snake_case : List[Any]=7 , __snake_case : Optional[Any]=3 , __snake_case : str=18 , __snake_case : Union[str, Any]=30 , __snake_case : Union[str, Any]=4_00 , __snake_case : Optional[int]=True , __snake_case : Any=None , __snake_case : List[str]=True , )-> Optional[Any]:
snake_case = size if size is not None else {"""height""": 18, """width""": 18}
snake_case = parent
snake_case = batch_size
snake_case = num_channels
snake_case = image_size
snake_case = min_resolution
snake_case = max_resolution
snake_case = do_resize
snake_case = size
snake_case = apply_ocr
def lowerCAmelCase ( self : List[Any] )-> List[str]:
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class _lowerCAmelCase ( A__ , unittest.TestCase ):
"""simple docstring"""
snake_case_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def lowerCAmelCase ( self : int )-> Tuple:
snake_case = LayoutLMvaImageProcessingTester(self )
@property
def lowerCAmelCase ( self : Tuple )-> Tuple:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCAmelCase ( self : Union[str, Any] )-> Any:
snake_case = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__snake_case , """do_resize""" ) )
self.assertTrue(hasattr(__snake_case , """size""" ) )
self.assertTrue(hasattr(__snake_case , """apply_ocr""" ) )
def lowerCAmelCase ( self : List[str] )-> List[Any]:
snake_case = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""height""": 18, """width""": 18} )
snake_case = self.image_processing_class.from_dict(self.image_processor_dict , size=42 )
self.assertEqual(image_processor.size , {"""height""": 42, """width""": 42} )
def lowerCAmelCase ( self : Dict )-> Union[str, Any]:
pass
def lowerCAmelCase ( self : Tuple )-> Dict:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , Image.Image )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors="""pt""" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
self.assertIsInstance(encoding.words , __snake_case )
self.assertIsInstance(encoding.boxes , __snake_case )
# Test batched
snake_case = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase ( self : int )-> str:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , numpify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , np.ndarray )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase ( self : List[Any] )-> Optional[Any]:
# Initialize image_processing
snake_case = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case = prepare_image_inputs(self.image_processor_tester , equal_resolution=__snake_case , torchify=__snake_case )
for image in image_inputs:
self.assertIsInstance(__snake_case , torch.Tensor )
# Test not batched input
snake_case = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
# Test batched
snake_case = image_processing(__snake_case , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["""height"""],
self.image_processor_tester.size["""width"""],
) , )
def lowerCAmelCase ( self : int )-> List[Any]:
# with apply_OCR = True
snake_case = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case = load_dataset("""hf-internal-testing/fixtures_docvqa""" , split="""test""" )
snake_case = Image.open(ds[0]["""file"""] ).convert("""RGB""" )
snake_case = image_processing(__snake_case , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """“Introductory""", """Remarks”""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
snake_case = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __snake_case )
self.assertListEqual(encoding.boxes , __snake_case )
# with apply_OCR = False
snake_case = LayoutLMvaImageProcessor(apply_ocr=__snake_case )
snake_case = image_processing(__snake_case , return_tensors="""pt""" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_24, 2_24) )
| 3 | 1 |
from __future__ import annotations
from random import random
class _snake_case :
def __init__( self , _lowerCamelCase = None ):
a :List[Any] = value
a :int = random()
a :Node | None = None
a :Node | None = None
def __repr__( self ):
from pprint import pformat
if self.left is None and self.right is None:
return F'''\'{self.value}: {self.prior:.5}\''''
else:
return pformat(
{F'''{self.value}: {self.prior:.5}''': (self.left, self.right)} , indent=1 )
def __str__( self ):
a :Union[str, Any] = str(self.value ) + ''' '''
a :Union[str, Any] = str(self.left or '''''' )
a :Optional[Any] = str(self.right or '''''' )
return value + left + right
def __lowerCamelCase ( UpperCAmelCase_ : Node | None , UpperCAmelCase_ : int ):
"""simple docstring"""
if root is None: # None tree is split into 2 Nones
return None, None
elif root.value is None:
return None, None
else:
if value < root.value:
a , a :int = split(root.left , UpperCAmelCase_ )
return left, root
else:
a , a :Tuple = split(root.right , UpperCAmelCase_ )
return root, right
def __lowerCamelCase ( UpperCAmelCase_ : Node | None , UpperCAmelCase_ : Node | None ):
"""simple docstring"""
if (not left) or (not right): # If one node is None, return the other
return left or right
elif left.prior < right.prior:
a :List[str] = merge(left.right , UpperCAmelCase_ )
return left
else:
a :str = merge(UpperCAmelCase_ , right.left )
return right
def __lowerCamelCase ( UpperCAmelCase_ : Node | None , UpperCAmelCase_ : int ):
"""simple docstring"""
a :Union[str, Any] = Node(UpperCAmelCase_ )
a , a :Dict = split(UpperCAmelCase_ , UpperCAmelCase_ )
return merge(merge(UpperCAmelCase_ , UpperCAmelCase_ ) , UpperCAmelCase_ )
def __lowerCamelCase ( UpperCAmelCase_ : Node | None , UpperCAmelCase_ : int ):
"""simple docstring"""
a , a :List[Any] = split(UpperCAmelCase_ , value - 1 )
a , a :Optional[int] = split(UpperCAmelCase_ , UpperCAmelCase_ )
return merge(UpperCAmelCase_ , UpperCAmelCase_ )
def __lowerCamelCase ( UpperCAmelCase_ : Node | None ):
"""simple docstring"""
if not root: # None
return
else:
inorder(root.left )
print(root.value , end=''',''' )
inorder(root.right )
def __lowerCamelCase ( UpperCAmelCase_ : Node | None , UpperCAmelCase_ : str ):
"""simple docstring"""
for arg in args.split():
if arg[0] == "+":
a :Dict = insert(UpperCAmelCase_ , int(arg[1:] ) )
elif arg[0] == "-":
a :List[Any] = erase(UpperCAmelCase_ , int(arg[1:] ) )
else:
print('''Unknown command''' )
return root
def __lowerCamelCase ( ):
"""simple docstring"""
a :int = None
print(
'''enter numbers to create a tree, + value to add value into treap, '''
'''- value to erase all nodes with value. \'q\' to quit. ''' )
a :Union[str, Any] = input()
while args != "q":
a :int = interact_treap(UpperCAmelCase_ , UpperCAmelCase_ )
print(UpperCAmelCase_ )
a :Optional[int] = input()
print('''good by!''' )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 94 |
import string
import numpy
def __lowerCamelCase ( UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
"""simple docstring"""
return b if a == 0 else greatest_common_divisor(b % a , UpperCAmelCase_ )
class _snake_case :
SCREAMING_SNAKE_CASE__ = string.ascii_uppercase + string.digits
# This cipher takes alphanumerics into account
# i.e. a total of 36 characters
# take x and return x % len(key_string)
SCREAMING_SNAKE_CASE__ = numpy.vectorize(lambda _snake_case : x % 36 )
SCREAMING_SNAKE_CASE__ = numpy.vectorize(_snake_case )
def __init__( self , _lowerCamelCase ):
a :List[Any] = self.modulus(_lowerCamelCase ) # mod36 calc's on the encrypt key
self.check_determinant() # validate the determinant of the encryption key
a :int = encrypt_key.shape[0]
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
return self.key_string.index(_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
return self.key_string[round(_lowerCamelCase )]
def SCREAMING_SNAKE_CASE__ ( self ):
a :str = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
a :Any = det % len(self.key_string )
a :Dict = len(self.key_string )
if greatest_common_divisor(_lowerCamelCase , len(self.key_string ) ) != 1:
a :int = (
F'''determinant modular {req_l} of encryption key({det}) '''
F'''is not co prime w.r.t {req_l}.\nTry another key.'''
)
raise ValueError(_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :Optional[Any] = [char for char in text.upper() if char in self.key_string]
a :List[str] = chars[-1]
while len(_lowerCamelCase ) % self.break_key != 0:
chars.append(_lowerCamelCase )
return "".join(_lowerCamelCase )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :Dict = self.process_text(text.upper() )
a :List[str] = ''''''
for i in range(0 , len(_lowerCamelCase ) - self.break_key + 1 , self.break_key ):
a :int = text[i : i + self.break_key]
a :Optional[int] = [self.replace_letters(_lowerCamelCase ) for char in batch]
a :Union[str, Any] = numpy.array([vec] ).T
a :str = self.modulus(self.encrypt_key.dot(_lowerCamelCase ) ).T.tolist()[
0
]
a :List[Any] = ''''''.join(
self.replace_digits(_lowerCamelCase ) for num in batch_encrypted )
encrypted += encrypted_batch
return encrypted
def SCREAMING_SNAKE_CASE__ ( self ):
a :List[Any] = round(numpy.linalg.det(self.encrypt_key ) )
if det < 0:
a :int = det % len(self.key_string )
a :Tuple = None
for i in range(len(self.key_string ) ):
if (det * i) % len(self.key_string ) == 1:
a :Tuple = i
break
a :List[str] = (
det_inv
* numpy.linalg.det(self.encrypt_key )
* numpy.linalg.inv(self.encrypt_key )
)
return self.to_int(self.modulus(_lowerCamelCase ) )
def SCREAMING_SNAKE_CASE__ ( self , _lowerCamelCase ):
a :List[Any] = self.make_decrypt_key()
a :str = self.process_text(text.upper() )
a :List[Any] = ''''''
for i in range(0 , len(_lowerCamelCase ) - self.break_key + 1 , self.break_key ):
a :Optional[Any] = text[i : i + self.break_key]
a :List[Any] = [self.replace_letters(_lowerCamelCase ) for char in batch]
a :str = numpy.array([vec] ).T
a :Dict = self.modulus(decrypt_key.dot(_lowerCamelCase ) ).T.tolist()[0]
a :List[Any] = ''''''.join(
self.replace_digits(_lowerCamelCase ) for num in batch_decrypted )
decrypted += decrypted_batch
return decrypted
def __lowerCamelCase ( ):
"""simple docstring"""
a :Tuple = int(input('''Enter the order of the encryption key: ''' ) )
a :Dict = []
print('''Enter each row of the encryption key with space separated integers''' )
for _ in range(UpperCAmelCase_ ):
a :List[str] = [int(UpperCAmelCase_ ) for x in input().split()]
hill_matrix.append(UpperCAmelCase_ )
a :Any = HillCipher(numpy.array(UpperCAmelCase_ ) )
print('''Would you like to encrypt or decrypt some text? (1 or 2)''' )
a :Any = input('''\n1. Encrypt\n2. Decrypt\n''' )
if option == "1":
a :str = input('''What text would you like to encrypt?: ''' )
print('''Your encrypted text is:''' )
print(hc.encrypt(UpperCAmelCase_ ) )
elif option == "2":
a :Dict = input('''What text would you like to decrypt?: ''' )
print('''Your decrypted text is:''' )
print(hc.decrypt(UpperCAmelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 94 | 1 |
'''simple docstring'''
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :int = logging.get_logger()
# the current default level is logging.WARNING
UpperCamelCase__ :List[Any] = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(UpperCamelCase_ )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Union[str, Any] = logging.get_verbosity()
UpperCamelCase__ :Union[str, Any] = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
UpperCamelCase__ :Optional[Any] = '''Testing 1, 2, 3'''
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(UpperCamelCase_ ) as cl:
logger.warning(UpperCamelCase_ )
self.assertEqual(cl.out , msg + '''\n''' )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(UpperCamelCase_ ) as cl:
logger.warning(UpperCamelCase_ )
self.assertEqual(cl.out , '''''' )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(UpperCamelCase_ ) as cl:
logger.warning(UpperCamelCase_ )
self.assertEqual(cl.out , msg + '''\n''' )
# restore to the original level
logging.set_verbosity(UpperCamelCase_ )
@mockenv(TRANSFORMERS_VERBOSITY='''error''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
UpperCamelCase__ :Tuple = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
UpperCamelCase__ :Any = os.getenv('''TRANSFORMERS_VERBOSITY''' , UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = logging.log_levels[env_level_str]
UpperCamelCase__ :int = logging.get_verbosity()
self.assertEqual(
UpperCamelCase_ , UpperCamelCase_ , F'''TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}''' , )
# restore to the original level
UpperCamelCase__ :Union[str, Any] = ''''''
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY='''super-error''' )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
UpperCamelCase__ :Dict = logging.logging.getLogger()
with CaptureLogger(UpperCamelCase_ ) as cl:
# this action activates the env var
logging.get_logger('''transformers.models.bart.tokenization_bart''' )
self.assertIn('''Unknown option TRANSFORMERS_VERBOSITY=super-error''' , cl.out )
# no need to restore as nothing was changed
def lowerCAmelCase__ ( self ):
'''simple docstring'''
transformers.utils.logging._reset_library_root_logger()
UpperCamelCase__ :Optional[int] = logging.get_logger('''transformers.models.bart.tokenization_bart''' )
UpperCamelCase__ :int = '''Testing 1, 2, 3'''
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''1''' ):
# nothing should be logged as env var disables this method
with CaptureLogger(UpperCamelCase_ ) as cl:
logger.warning_advice(UpperCamelCase_ )
self.assertEqual(cl.out , '''''' )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS='''''' ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(UpperCamelCase_ ) as cl:
logger.warning_advice(UpperCamelCase_ )
self.assertEqual(cl.out , msg + '''\n''' )
def a ( ) -> str:
'''simple docstring'''
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled() | 219 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__snake_case = {'''tokenization_wav2vec2_phoneme''': ['''Wav2Vec2PhonemeCTCTokenizer''']}
if TYPE_CHECKING:
from .tokenization_wavaveca_phoneme import WavaVecaPhonemeCTCTokenizer
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 219 | 1 |
'''simple docstring'''
import argparse
import json
import os
import torch
from transformers import LukeConfig, LukeModel, LukeTokenizer, RobertaTokenizer
from transformers.tokenization_utils_base import AddedToken
@torch.no_grad()
def _A (lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :Tuple , lowerCAmelCase__ :Any , lowerCAmelCase__ :List[str] , lowerCAmelCase__ :Optional[Any] ) -> Tuple:
'''simple docstring'''
with open(lowerCAmelCase__ ) as metadata_file:
_a = json.load(lowerCAmelCase__ )
_a = LukeConfig(use_entity_aware_attention=lowerCAmelCase__ , **metadata['model_config'] )
# Load in the weights from the checkpoint_path
_a = torch.load(lowerCAmelCase__ , map_location='cpu' )
# Load the entity vocab file
_a = load_entity_vocab(lowerCAmelCase__ )
_a = RobertaTokenizer.from_pretrained(metadata['model_config']['bert_model_name'] )
# Add special tokens to the token vocabulary for downstream tasks
_a = AddedToken('<ent>' , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )
_a = AddedToken('<ent2>' , lstrip=lowerCAmelCase__ , rstrip=lowerCAmelCase__ )
tokenizer.add_special_tokens({'additional_special_tokens': [entity_token_a, entity_token_a]} )
config.vocab_size += 2
print(f'Saving tokenizer to {pytorch_dump_folder_path}' )
tokenizer.save_pretrained(lowerCAmelCase__ )
with open(os.path.join(lowerCAmelCase__ , LukeTokenizer.vocab_files_names['entity_vocab_file'] ) , 'w' ) as f:
json.dump(lowerCAmelCase__ , lowerCAmelCase__ )
_a = LukeTokenizer.from_pretrained(lowerCAmelCase__ )
# Initialize the embeddings of the special tokens
_a = state_dict['embeddings.word_embeddings.weight']
_a = word_emb[tokenizer.convert_tokens_to_ids(['@'] )[0]].unsqueeze(0 )
_a = word_emb[tokenizer.convert_tokens_to_ids(['#'] )[0]].unsqueeze(0 )
_a = torch.cat([word_emb, ent_emb, enta_emb] )
# Initialize the query layers of the entity-aware self-attention mechanism
for layer_index in range(config.num_hidden_layers ):
for matrix_name in ["query.weight", "query.bias"]:
_a = f'encoder.layer.{layer_index}.attention.self.'
_a = state_dict[prefix + matrix_name]
_a = state_dict[prefix + matrix_name]
_a = state_dict[prefix + matrix_name]
# Initialize the embedding of the [MASK2] entity using that of the [MASK] entity for downstream tasks
_a = state_dict['entity_embeddings.entity_embeddings.weight']
_a = entity_emb[entity_vocab['[MASK]']]
_a = LukeModel(config=lowerCAmelCase__ ).eval()
_a , _a = model.load_state_dict(lowerCAmelCase__ , strict=lowerCAmelCase__ )
if not (len(lowerCAmelCase__ ) == 1 and missing_keys[0] == "embeddings.position_ids"):
raise ValueError(f'Missing keys {", ".join(lowerCAmelCase__ )}. Expected only missing embeddings.position_ids' )
if not (all(key.startswith('entity_predictions' ) or key.startswith('lm_head' ) for key in unexpected_keys )):
raise ValueError(
'Unexpected keys'
f' {", ".join([key for key in unexpected_keys if not (key.startswith("entity_predictions" ) or key.startswith("lm_head" ))] )}' )
# Check outputs
_a = LukeTokenizer.from_pretrained(lowerCAmelCase__ , task='entity_classification' )
_a = (
'Top seed Ana Ivanovic said on Thursday she could hardly believe her luck as a fortuitous netcord helped the'
' new world number one avoid a humiliating second- round exit at Wimbledon .'
)
_a = (39, 42)
_a = tokenizer(lowerCAmelCase__ , entity_spans=[span] , add_prefix_space=lowerCAmelCase__ , return_tensors='pt' )
_a = model(**lowerCAmelCase__ )
# Verify word hidden states
if model_size == "large":
_a = torch.Size((1, 42, 10_24) )
_a = torch.tensor(
[[0.0_1_3_3, 0.0_8_6_5, 0.0_0_9_5], [0.3_0_9_3, -0.2_5_7_6, -0.7_4_1_8], [-0.1_7_2_0, -0.2_1_1_7, -0.2_8_6_9]] )
else: # base
_a = torch.Size((1, 42, 7_68) )
_a = torch.tensor([[0.0_0_3_7, 0.1_3_6_8, -0.0_0_9_1], [0.1_0_9_9, 0.3_3_2_9, -0.1_0_9_5], [0.0_7_6_5, 0.5_3_3_5, 0.1_1_7_9]] )
if not (outputs.last_hidden_state.shape == expected_shape):
raise ValueError(
f'Outputs.last_hidden_state.shape is {outputs.last_hidden_state.shape}, Expected shape is {expected_shape}' )
if not torch.allclose(outputs.last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 ):
raise ValueError
# Verify entity hidden states
if model_size == "large":
_a = torch.Size((1, 1, 10_24) )
_a = torch.tensor([[0.0_4_6_6, -0.0_1_0_6, -0.0_1_7_9]] )
else: # base
_a = torch.Size((1, 1, 7_68) )
_a = torch.tensor([[0.1_4_5_7, 0.1_0_4_4, 0.0_1_7_4]] )
if not (outputs.entity_last_hidden_state.shape != expected_shape):
raise ValueError(
f'Outputs.entity_last_hidden_state.shape is {outputs.entity_last_hidden_state.shape}, Expected shape is'
f' {expected_shape}' )
if not torch.allclose(outputs.entity_last_hidden_state[0, :3, :3] , lowerCAmelCase__ , atol=1E-4 ):
raise ValueError
# Finally, save our PyTorch model and tokenizer
print('Saving PyTorch model to {}'.format(lowerCAmelCase__ ) )
model.save_pretrained(lowerCAmelCase__ )
def _A (lowerCAmelCase__ :str ) -> List[Any]:
'''simple docstring'''
_a = {}
with open(lowerCAmelCase__ , 'r' , encoding='utf-8' ) as f:
for index, line in enumerate(lowerCAmelCase__ ):
_a , _a = line.rstrip().split('\t' )
_a = index
return entity_vocab
if __name__ == "__main__":
a_ : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument("--checkpoint_path", type=str, help="Path to a pytorch_model.bin file.")
parser.add_argument(
"--metadata_path", default=None, type=str, help="Path to a metadata.json file, defining the configuration."
)
parser.add_argument(
"--entity_vocab_path",
default=None,
type=str,
help="Path to an entity_vocab.tsv file, containing the entity vocabulary.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to where to dump the output PyTorch model."
)
parser.add_argument(
"--model_size", default="base", type=str, choices=["base", "large"], help="Size of the model to be converted."
)
a_ : List[Any] = parser.parse_args()
convert_luke_checkpoint(
args.checkpoint_path,
args.metadata_path,
args.entity_vocab_path,
args.pytorch_dump_folder_path,
args.model_size,
)
| 168 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a_ : Union[str, Any] = {
"configuration_layoutlmv3": [
"LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP",
"LayoutLMv3Config",
"LayoutLMv3OnnxConfig",
],
"processing_layoutlmv3": ["LayoutLMv3Processor"],
"tokenization_layoutlmv3": ["LayoutLMv3Tokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Tuple = ["LayoutLMv3TokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : str = [
"LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST",
"LayoutLMv3ForQuestionAnswering",
"LayoutLMv3ForSequenceClassification",
"LayoutLMv3ForTokenClassification",
"LayoutLMv3Model",
"LayoutLMv3PreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Any = [
"TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFLayoutLMv3ForQuestionAnswering",
"TFLayoutLMv3ForSequenceClassification",
"TFLayoutLMv3ForTokenClassification",
"TFLayoutLMv3Model",
"TFLayoutLMv3PreTrainedModel",
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ : Any = ["LayoutLMv3FeatureExtractor"]
a_ : List[str] = ["LayoutLMv3ImageProcessor"]
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
a_ : Tuple = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 168 | 1 |
from math import ceil
def SCREAMING_SNAKE_CASE ( __UpperCamelCase : int = 1001 ) -> int:
UpperCAmelCase_ = 1
for i in range(1 , int(ceil(n / 2.0 ) ) ):
UpperCAmelCase_ = 2 * i + 1
UpperCAmelCase_ = 2 * i
UpperCAmelCase_ = total + 4 * odd**2 - 6 * even
return total
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution())
else:
try:
_lowerCamelCase = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number')
| 177 |
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
_lowerCamelCase = logging.get_logger(__name__)
class a ( _A ):
'''simple docstring'''
lowerCAmelCase : str = ['input_values', 'padding_mask']
def __init__( self : Optional[Any] , __snake_case : int = 1 , __snake_case : int = 2_40_00 , __snake_case : float = 0.0 , __snake_case : float = None , __snake_case : float = None , **__snake_case : Dict , ):
super().__init__(feature_size=__snake_case , sampling_rate=__snake_case , padding_value=__snake_case , **__snake_case )
UpperCAmelCase_ = chunk_length_s
UpperCAmelCase_ = overlap
@property
def lowerCamelCase_ ( self : List[str] ):
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def lowerCamelCase_ ( self : List[str] ):
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self : List[str] , __snake_case : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __snake_case : Optional[Union[bool, str, PaddingStrategy]] = None , __snake_case : Optional[bool] = False , __snake_case : Optional[int] = None , __snake_case : Optional[Union[str, TensorType]] = None , __snake_case : Optional[int] = None , ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'''It is strongly recommended to pass the `sampling_rate` argument to this function. '''
'''Failing to do so can result in silent errors that might be hard to debug.''' )
if padding and truncation:
raise ValueError('''Both padding and truncation were set. Make sure you only set one.''' )
elif padding is None:
# by default let's pad the inputs
UpperCAmelCase_ = True
UpperCAmelCase_ = bool(
isinstance(__snake_case , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
UpperCAmelCase_ = [np.asarray(__snake_case , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(__snake_case , np.ndarray ):
UpperCAmelCase_ = np.asarray(__snake_case , dtype=np.floataa )
elif isinstance(__snake_case , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
UpperCAmelCase_ = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
UpperCAmelCase_ = [np.asarray(__snake_case ).T]
# verify inputs are valid
for idx, example in enumerate(__snake_case ):
if example.ndim > 2:
raise ValueError(F'Expected input shape (channels, length) but got shape {example.shape}' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F'Expected mono audio but example has {example.shape[-1]} channels' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F'Expected stereo audio but example has {example.shape[-1]} channels' )
UpperCAmelCase_ = None
UpperCAmelCase_ = BatchFeature({'''input_values''': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
UpperCAmelCase_ = min(array.shape[0] for array in raw_audio )
UpperCAmelCase_ = int(np.floor(max_length / self.chunk_stride ) )
UpperCAmelCase_ = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
UpperCAmelCase_ = max(array.shape[0] for array in raw_audio )
UpperCAmelCase_ = int(np.ceil(max_length / self.chunk_stride ) )
UpperCAmelCase_ = (nb_step - 1) * self.chunk_stride + self.chunk_length
UpperCAmelCase_ = '''max_length'''
else:
UpperCAmelCase_ = input_values
# normal padding on batch
if padded_inputs is None:
UpperCAmelCase_ = self.pad(
__snake_case , max_length=__snake_case , truncation=__snake_case , padding=__snake_case , return_attention_mask=__snake_case , )
if padding:
UpperCAmelCase_ = padded_inputs.pop('''attention_mask''' )
UpperCAmelCase_ = []
for example in padded_inputs.pop('''input_values''' ):
if self.feature_size == 1:
UpperCAmelCase_ = example[..., None]
input_values.append(example.T )
UpperCAmelCase_ = input_values
if return_tensors is not None:
UpperCAmelCase_ = padded_inputs.convert_to_tensors(__snake_case )
return padded_inputs
| 177 | 1 |
from ...processing_utils import ProcessorMixin
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE ):
_a = """WhisperFeatureExtractor"""
_a = """WhisperTokenizer"""
def __init__( self , lowerCAmelCase , lowerCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
super().__init__(lowerCAmelCase , lowerCAmelCase )
_lowercase =self.feature_extractor
_lowercase =False
def A__ ( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=True ) -> str:
'''simple docstring'''
return self.tokenizer.get_decoder_prompt_ids(task=lowerCAmelCase , language=lowerCAmelCase , no_timestamps=lowerCAmelCase )
def __call__( self , *lowerCAmelCase , **lowerCAmelCase ) -> Dict:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase , **lowerCAmelCase )
_lowercase =kwargs.pop('audio' , lowerCAmelCase )
_lowercase =kwargs.pop('sampling_rate' , lowerCAmelCase )
_lowercase =kwargs.pop('text' , lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
_lowercase =args[0]
_lowercase =args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
_lowercase =self.feature_extractor(lowerCAmelCase , *lowerCAmelCase , sampling_rate=lowerCAmelCase , **lowerCAmelCase )
if text is not None:
_lowercase =self.tokenizer(lowerCAmelCase , **lowerCAmelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
_lowercase =encodings['input_ids']
return inputs
def A__ ( self , *lowerCAmelCase , **lowerCAmelCase ) -> Any:
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , *lowerCAmelCase , **lowerCAmelCase ) -> List[str]:
'''simple docstring'''
return self.tokenizer.decode(*lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase="np" ) -> str:
'''simple docstring'''
return self.tokenizer.get_prompt_ids(lowerCAmelCase , return_tensors=lowerCAmelCase )
| 205 |
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
lowercase_ = 1e-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase , lowerCAmelCase=16 , lowerCAmelCase=13 , lowerCAmelCase=7 , lowerCAmelCase=14 , lowerCAmelCase=10 , lowerCAmelCase=19 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=True , lowerCAmelCase=16 , lowerCAmelCase=2 , lowerCAmelCase=4 , lowerCAmelCase=4 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=[1, 2, 3, 4, 5] , lowerCAmelCase=25 , lowerCAmelCase=5 , ) -> Optional[Any]:
'''simple docstring'''
_lowercase =d_model
_lowercase =parent
_lowercase =batch_size
_lowercase =prediction_length
_lowercase =context_length
_lowercase =cardinality
_lowercase =num_time_features
_lowercase =lags_sequence
_lowercase =embedding_dimension
_lowercase =is_training
_lowercase =hidden_size
_lowercase =num_hidden_layers
_lowercase =num_attention_heads
_lowercase =intermediate_size
_lowercase =hidden_act
_lowercase =hidden_dropout_prob
_lowercase =attention_probs_dropout_prob
_lowercase =context_length
_lowercase =prediction_length + label_length
_lowercase =label_length
_lowercase =moving_average
_lowercase =autocorrelation_factor
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def A__ ( self , lowerCAmelCase ) -> Dict:
'''simple docstring'''
_lowercase =config.context_length + max(config.lags_sequence )
_lowercase =ids_tensor([self.batch_size, 1] , config.cardinality[0] )
_lowercase =floats_tensor([self.batch_size, _past_length, config.num_time_features] )
_lowercase =floats_tensor([self.batch_size, _past_length] )
_lowercase =floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
_lowercase =floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
_lowercase =floats_tensor([self.batch_size, config.prediction_length] )
_lowercase ={
'past_values': past_values,
'static_categorical_features': static_categorical_features,
'past_time_features': past_time_features,
'past_observed_mask': past_observed_mask,
'future_time_features': future_time_features,
'future_values': future_values,
}
return inputs_dict
def A__ ( self ) -> List[str]:
'''simple docstring'''
_lowercase =self.get_config()
_lowercase =self.prepare_autoformer_inputs_dict(lowerCAmelCase )
return config, inputs_dict
def A__ ( self ) -> List[str]:
'''simple docstring'''
_lowercase , _lowercase =self.prepare_config_and_inputs()
return config, inputs_dict
def A__ ( self , lowerCAmelCase , lowerCAmelCase ) -> Tuple:
'''simple docstring'''
_lowercase =AutoformerModel(config=lowerCAmelCase ).to(lowerCAmelCase ).eval()
_lowercase =model(**lowerCAmelCase )
_lowercase =outputs.encoder_last_hidden_state
_lowercase =outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase =model.get_encoder()
encoder.save_pretrained(lowerCAmelCase )
_lowercase =AutoformerEncoder.from_pretrained(lowerCAmelCase ).to(lowerCAmelCase )
_lowercase , _lowercase , _lowercase , _lowercase , _lowercase =model.create_network_inputs(**lowerCAmelCase )
_lowercase , _lowercase =model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
_lowercase =torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
_lowercase =encoder(inputs_embeds=lowerCAmelCase )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
_lowercase =(
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
_lowercase =torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
_lowercase =torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
_lowercase =torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
_lowercase =model.get_decoder()
decoder.save_pretrained(lowerCAmelCase )
_lowercase =AutoformerDecoder.from_pretrained(lowerCAmelCase ).to(lowerCAmelCase )
_lowercase =decoder(
trend=lowerCAmelCase , inputs_embeds=lowerCAmelCase , encoder_hidden_states=lowerCAmelCase , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class __lowerCAmelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , unittest.TestCase ):
_a = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
_a = (AutoformerForPrediction,) if is_torch_available() else ()
_a = {"""feature-extraction""": AutoformerModel} if is_torch_available() else {}
_a = False
_a = False
_a = False
_a = False
_a = False
_a = False
def A__ ( self ) -> str:
'''simple docstring'''
_lowercase =AutoformerModelTester(self )
_lowercase =ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase )
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def A__ ( self ) -> Optional[Any]:
'''simple docstring'''
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_lowercase =model_class(lowerCAmelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(lowerCAmelCase )
_lowercase , _lowercase =model_class.from_pretrained(lowerCAmelCase , output_loading_info=lowerCAmelCase )
self.assertEqual(info['missing_keys'] , [] )
def A__ ( self ) -> List[str]:
'''simple docstring'''
_lowercase =self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*lowerCAmelCase )
@unittest.skip(reason='Model has no tokens embeddings' )
def A__ ( self ) -> int:
'''simple docstring'''
pass
def A__ ( self ) -> Dict:
'''simple docstring'''
_lowercase =inspect.signature(getattr(lowerCAmelCase , 'forward' ) )
# The main input is the name of the argument after `self`
_lowercase =list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , lowerCAmelCase )
def A__ ( self ) -> List[Any]:
'''simple docstring'''
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_lowercase =model_class(lowerCAmelCase )
_lowercase =inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_lowercase =[*signature.parameters.keys()]
_lowercase =[
'past_values',
'past_time_features',
'past_observed_mask',
'static_categorical_features',
'static_real_features',
'future_values',
'future_time_features',
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append('future_observed_mask' )
expected_arg_names.extend(
[
'decoder_attention_mask',
'head_mask',
'decoder_head_mask',
'cross_attn_head_mask',
'encoder_outputs',
'past_key_values',
'output_hidden_states',
'output_attentions',
'use_cache',
'return_dict',
] )
self.assertListEqual(arg_names[: len(lowerCAmelCase )] , lowerCAmelCase )
def A__ ( self ) -> int:
'''simple docstring'''
_lowercase , _lowercase =self.model_tester.prepare_config_and_inputs_for_common()
_lowercase =True
_lowercase =getattr(self.model_tester , 'seq_length' , lowerCAmelCase )
_lowercase =getattr(self.model_tester , 'decoder_seq_length' , lowerCAmelCase )
_lowercase =getattr(self.model_tester , 'encoder_seq_length' , lowerCAmelCase )
_lowercase =getattr(self.model_tester , 'd_model' , lowerCAmelCase )
_lowercase =getattr(self.model_tester , 'num_attention_heads' , lowerCAmelCase )
_lowercase =d_model // num_attention_heads
for model_class in self.all_model_classes:
_lowercase =True
_lowercase =False
_lowercase =True
_lowercase =model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowercase =model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
_lowercase =outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_lowercase =True
_lowercase =model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowercase =model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
_lowercase =outputs.encoder_attentions
self.assertEqual(len(lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
_lowercase =len(lowerCAmelCase )
_lowercase =7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
# decoder attentions
_lowercase =outputs.decoder_attentions
self.assertIsInstance(lowerCAmelCase , (list, tuple) )
self.assertEqual(len(lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
_lowercase =outputs.cross_attentions
self.assertIsInstance(lowerCAmelCase , (list, tuple) )
self.assertEqual(len(lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
_lowercase =True
_lowercase =True
_lowercase =model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
_lowercase =model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
self.assertEqual(out_len + 2 , len(lowerCAmelCase ) )
_lowercase =outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(lowerCAmelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def A__ ( self ) -> Dict:
'''simple docstring'''
super().test_retain_grad_hidden_states_attentions()
def a ( A__ : List[str]="train-batch.pt" ) -> str:
"""simple docstring"""
_lowercase =hf_hub_download(repo_id='hf-internal-testing/tourism-monthly-batch' , filename=A__ , repo_type='dataset' )
_lowercase =torch.load(A__ , map_location=A__ )
return batch
@require_torch
@slow
class __lowerCAmelCase ( unittest.TestCase ):
def A__ ( self ) -> int:
'''simple docstring'''
_lowercase =AutoformerModel.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(lowerCAmelCase )
_lowercase =prepare_batch()
with torch.no_grad():
_lowercase =model(
past_values=batch['past_values'] , past_time_features=batch['past_time_features'] , past_observed_mask=batch['past_observed_mask'] , static_categorical_features=batch['static_categorical_features'] , future_values=batch['future_values'] , future_time_features=batch['future_time_features'] , )[0]
_lowercase =torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , lowerCAmelCase )
_lowercase =torch.tensor(
[[0.3593, -1.3398, 0.6330], [0.2279, 1.5396, -0.1792], [0.0450, 1.3225, -0.2335]] , device=lowerCAmelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCAmelCase , atol=lowerCAmelCase ) )
def A__ ( self ) -> str:
'''simple docstring'''
_lowercase =AutoformerForPrediction.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(lowerCAmelCase )
_lowercase =prepare_batch('val-batch.pt' )
with torch.no_grad():
_lowercase =model(
past_values=batch['past_values'] , past_time_features=batch['past_time_features'] , past_observed_mask=batch['past_observed_mask'] , static_categorical_features=batch['static_categorical_features'] , ).encoder_last_hidden_state
_lowercase =torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , lowerCAmelCase )
_lowercase =torch.tensor(
[[-0.0734, -0.9036, 0.8358], [4.7186, 2.4113, 1.9581], [1.7953, 2.3558, 1.2970]] , device=lowerCAmelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , lowerCAmelCase , atol=lowerCAmelCase ) )
def A__ ( self ) -> Optional[int]:
'''simple docstring'''
_lowercase =AutoformerForPrediction.from_pretrained('huggingface/autoformer-tourism-monthly' ).to(lowerCAmelCase )
_lowercase =prepare_batch('val-batch.pt' )
with torch.no_grad():
_lowercase =model.generate(
static_categorical_features=batch['static_categorical_features'] , past_time_features=batch['past_time_features'] , past_values=batch['past_values'] , future_time_features=batch['future_time_features'] , past_observed_mask=batch['past_observed_mask'] , )
_lowercase =torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , lowerCAmelCase )
_lowercase =torch.tensor([3130.6763, 4056.5293, 7053.0786] , device=lowerCAmelCase )
_lowercase =outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , lowerCAmelCase , rtol=1e-1 ) )
| 205 | 1 |
import argparse
import json
from tqdm import tqdm
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--src_path" , type=__UpperCamelCase , default="biencoder-nq-dev.json" , help="Path to raw DPR training data" , )
parser.add_argument(
"--evaluation_set" , type=__UpperCamelCase , help="where to store parsed evaluation_set file" , )
parser.add_argument(
"--gold_data_path" , type=__UpperCamelCase , help="where to store parsed gold_data_path file" , )
a = parser.parse_args()
with open(args.src_path , "r") as src_file, open(args.evaluation_set , "w") as eval_file, open(
args.gold_data_path , "w") as gold_file:
a = json.load(__UpperCamelCase)
for dpr_record in tqdm(__UpperCamelCase):
a = dpr_record["question"]
a = [context["title"] for context in dpr_record["positive_ctxs"]]
eval_file.write(question + "\n")
gold_file.write("\t".join(__UpperCamelCase) + "\n")
if __name__ == "__main__":
main()
| 180 |
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(UpperCamelCase__ ) , """Tatoeba directory does not exist.""" )
class a__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase_ ( self ) -> List[str]:
'''simple docstring'''
a = tempfile.mkdtemp()
return TatoebaConverter(save_dir=A )
@slow
def lowerCAmelCase_ ( self ) -> int:
'''simple docstring'''
self.resolver.convert_models(["heb-eng"] )
@slow
def lowerCAmelCase_ ( self ) -> Dict:
'''simple docstring'''
a , a = self.resolver.write_model_card("opus-mt-he-en" , dry_run=A )
assert mmeta["long_pair"] == "heb-eng"
| 180 | 1 |
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
_snake_case : List[Any] = 16
_snake_case : Any = 32
def a_ ( lowerCAmelCase_ : Accelerator, lowerCAmelCase_ : int = 16 ):
__lowerCAmelCase = AutoTokenizer.from_pretrained('bert-base-cased' )
__lowerCAmelCase = load_dataset('glue', 'mrpc' )
def tokenize_function(lowerCAmelCase_ : int ):
# max_length=None => use the model max length (it's actually the default)
__lowerCAmelCase = tokenizer(examples['sentence1'], examples['sentence2'], truncation=lowerCAmelCase_, max_length=lowerCAmelCase_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
__lowerCAmelCase = datasets.map(
lowerCAmelCase_, batched=lowerCAmelCase_, remove_columns=['idx', 'sentence1', 'sentence2'], )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
__lowerCAmelCase = tokenized_datasets.rename_column('label', 'labels' )
def collate_fn(lowerCAmelCase_ : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
__lowerCAmelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
__lowerCAmelCase = 16
elif accelerator.mixed_precision != "no":
__lowerCAmelCase = 8
else:
__lowerCAmelCase = None
return tokenizer.pad(
lowerCAmelCase_, padding='longest', max_length=lowerCAmelCase_, pad_to_multiple_of=lowerCAmelCase_, return_tensors='pt', )
# Instantiate dataloaders.
__lowerCAmelCase = DataLoader(
tokenized_datasets['train'], shuffle=lowerCAmelCase_, collate_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_, drop_last=lowerCAmelCase_ )
__lowerCAmelCase = DataLoader(
tokenized_datasets['validation'], shuffle=lowerCAmelCase_, collate_fn=lowerCAmelCase_, batch_size=lowerCAmelCase_, drop_last=(accelerator.mixed_precision == 'fp8'), )
return train_dataloader, eval_dataloader
def a_ ( lowerCAmelCase_ : Any, lowerCAmelCase_ : int ):
# Initialize accelerator
__lowerCAmelCase = Accelerator(cpu=args.cpu, mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
__lowerCAmelCase = config['lr']
__lowerCAmelCase = int(config['num_epochs'] )
__lowerCAmelCase = int(config['seed'] )
__lowerCAmelCase = int(config['batch_size'] )
__lowerCAmelCase = evaluate.load('glue', 'mrpc' )
# If the batch size is too big we use gradient accumulation
__lowerCAmelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
__lowerCAmelCase = batch_size // MAX_GPU_BATCH_SIZE
__lowerCAmelCase = MAX_GPU_BATCH_SIZE
set_seed(lowerCAmelCase_ )
__lowerCAmelCase , __lowerCAmelCase = get_dataloaders(lowerCAmelCase_, lowerCAmelCase_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
__lowerCAmelCase = AutoModelForSequenceClassification.from_pretrained('bert-base-cased', return_dict=lowerCAmelCase_ )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
__lowerCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
__lowerCAmelCase = AdamW(params=model.parameters(), lr=lowerCAmelCase_ )
# Instantiate scheduler
__lowerCAmelCase = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase_, num_warmup_steps=100, num_training_steps=(len(lowerCAmelCase_ ) * num_epochs) // gradient_accumulation_steps, )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase = accelerator.prepare(
lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_, lowerCAmelCase_ )
# Now we train the model
for epoch in range(lowerCAmelCase_ ):
model.train()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
__lowerCAmelCase = model(**lowerCAmelCase_ )
__lowerCAmelCase = outputs.loss
__lowerCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
__lowerCAmelCase = model(**lowerCAmelCase_ )
__lowerCAmelCase = outputs.logits.argmax(dim=-1 )
__lowerCAmelCase , __lowerCAmelCase = accelerator.gather_for_metrics((predictions, batch['labels']) )
metric.add_batch(
predictions=lowerCAmelCase_, references=lowerCAmelCase_, )
__lowerCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F"""epoch {epoch}:""", lowerCAmelCase_ )
def a_ ( ):
__lowerCAmelCase = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument(
'--mixed_precision', type=lowerCAmelCase_, default=lowerCAmelCase_, choices=['no', 'fp16', 'bf16', 'fp8'], help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.', )
parser.add_argument('--cpu', action='store_true', help='If passed, will train on the CPU.' )
__lowerCAmelCase = parser.parse_args()
__lowerCAmelCase = {'lr': 2E-5, 'num_epochs': 3, 'seed': 42, 'batch_size': 16}
training_function(lowerCAmelCase_, lowerCAmelCase_ )
if __name__ == "__main__":
main()
| 284 |
def a_ ( lowerCAmelCase_ : int ):
if number < 0:
raise ValueError('number must not be negative' )
return number & (number - 1) == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 284 | 1 |
"""simple docstring"""
from sklearn.metrics import recall_score
import datasets
__snake_case = '''
Recall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:
Recall = TP / (TP + FN)
Where TP is the true positives and FN is the false negatives.
'''
__snake_case = '''
Args:
- **predictions** (`list` of `int`): The predicted labels.
- **references** (`list` of `int`): The ground truth labels.
- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.
- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.
- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.
- `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.
- `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.
- `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.
- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .
- `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.
- `0`: If there is a zero division, the return value is `0`.
- `1`: If there is a zero division, the return value is `1`.
Returns:
- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.
Examples:
Example 1-A simple example with some errors
>>> recall_metric = datasets.load_metric(\'recall\')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])
>>> print(results)
{\'recall\': 0.6666666666666666}
Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)
>>> print(results)
{\'recall\': 0.5}
Example 3-The same example as Example 1, but with `sample_weight` included.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]
>>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)
>>> print(results)
{\'recall\': 0.55}
Example 4-A multiclass example, using different averages.
>>> recall_metric = datasets.load_metric(\'recall\')
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')
>>> print(results)
{\'recall\': 0.3333333333333333}
>>> results = recall_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'recall\': array([1., 0., 0.])}
'''
__snake_case = '''
@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowerCamelCase ( datasets.Metric ):
'''simple docstring'''
def _UpperCAmelCase ( self ) -> List[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , )
def _UpperCAmelCase ( self , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=1 , __UpperCAmelCase="binary" , __UpperCAmelCase=None , __UpperCAmelCase="warn" , ) -> str:
_a = recall_score(
__UpperCAmelCase , __UpperCAmelCase , labels=__UpperCAmelCase , pos_label=__UpperCAmelCase , average=__UpperCAmelCase , sample_weight=__UpperCAmelCase , zero_division=__UpperCAmelCase , )
return {"recall": float(__UpperCAmelCase ) if score.size == 1 else score} | 351 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__snake_case = {
'''configuration_resnet''': ['''RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''ResNetConfig''', '''ResNetOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''ResNetForImageClassification''',
'''ResNetModel''',
'''ResNetPreTrainedModel''',
'''ResNetBackbone''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFResNetForImageClassification''',
'''TFResNetModel''',
'''TFResNetPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''FlaxResNetForImageClassification''',
'''FlaxResNetModel''',
'''FlaxResNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_resnet import RESNET_PRETRAINED_CONFIG_ARCHIVE_MAP, ResNetConfig, ResNetOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_resnet import (
RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
ResNetBackbone,
ResNetForImageClassification,
ResNetModel,
ResNetPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_resnet import (
TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFResNetForImageClassification,
TFResNetModel,
TFResNetPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_resnet import FlaxResNetForImageClassification, FlaxResNetModel, FlaxResNetPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 153 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowerCamelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
lowercase_ = KandinskyVaaControlnetPipeline
lowercase_ = ["image_embeds", "negative_image_embeds", "hint"]
lowercase_ = ["image_embeds", "negative_image_embeds", "hint"]
lowercase_ = [
"generator",
"height",
"width",
"latents",
"guidance_scale",
"num_inference_steps",
"return_dict",
"guidance_scale",
"num_images_per_prompt",
"output_type",
"return_dict",
]
lowercase_ = False
@property
def lowerCAmelCase_ ( self : int ):
return 32
@property
def lowerCAmelCase_ ( self : Dict ):
return 32
@property
def lowerCAmelCase_ ( self : List[Any] ):
return self.time_input_dim
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
return self.time_input_dim * 4
@property
def lowerCAmelCase_ ( self : Any ):
return 100
@property
def lowerCAmelCase_ ( self : Union[str, Any] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = {
'in_channels': 8,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image_hint',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
SCREAMING_SNAKE_CASE_ = UNetaDConditionModel(**_lowerCAmelCase )
return model
@property
def lowerCAmelCase_ ( self : str ):
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
torch.manual_seed(0 )
SCREAMING_SNAKE_CASE_ = VQModel(**self.dummy_movq_kwargs )
return model
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = self.dummy_unet
SCREAMING_SNAKE_CASE_ = self.dummy_movq
SCREAMING_SNAKE_CASE_ = DDIMScheduler(
num_train_timesteps=1_000 , beta_schedule='linear' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_lowerCAmelCase , set_alpha_to_one=_lowerCAmelCase , steps_offset=1 , prediction_type='epsilon' , thresholding=_lowerCAmelCase , )
SCREAMING_SNAKE_CASE_ = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def lowerCAmelCase_ ( self : Dict , _lowerCAmelCase : Tuple , _lowerCAmelCase : List[str]=0 ):
SCREAMING_SNAKE_CASE_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_lowerCAmelCase )
# create hint
SCREAMING_SNAKE_CASE_ = floats_tensor((1, 3, 64, 64) , rng=random.Random(_lowerCAmelCase ) ).to(_lowerCAmelCase )
if str(_lowerCAmelCase ).startswith('mps' ):
SCREAMING_SNAKE_CASE_ = torch.manual_seed(_lowerCAmelCase )
else:
SCREAMING_SNAKE_CASE_ = torch.Generator(device=_lowerCAmelCase ).manual_seed(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'hint': hint,
'generator': generator,
'height': 64,
'width': 64,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def lowerCAmelCase_ ( self : List[str] ):
SCREAMING_SNAKE_CASE_ = 'cpu'
SCREAMING_SNAKE_CASE_ = self.get_dummy_components()
SCREAMING_SNAKE_CASE_ = self.pipeline_class(**_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = pipe.to(_lowerCAmelCase )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = pipe(**self.get_dummy_inputs(_lowerCAmelCase ) )
SCREAMING_SNAKE_CASE_ = output.images
SCREAMING_SNAKE_CASE_ = pipe(
**self.get_dummy_inputs(_lowerCAmelCase ) , return_dict=_lowerCAmelCase , )[0]
SCREAMING_SNAKE_CASE_ = image[0, -3:, -3:, -1]
SCREAMING_SNAKE_CASE_ = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
SCREAMING_SNAKE_CASE_ = np.array(
[0.695_9826, 0.86_8279, 0.755_8092, 0.6876_9467, 0.8580_5804, 0.6597_7496, 0.4488_5302, 0.595_9111, 0.425_1595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
def lowerCAmelCase_ ( self : List[Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCAmelCase_ ( self : Any ):
SCREAMING_SNAKE_CASE_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy' )
SCREAMING_SNAKE_CASE_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/hint_image_cat.png' )
SCREAMING_SNAKE_CASE_ = torch.from_numpy(np.array(_lowerCAmelCase ) ).float() / 255.0
SCREAMING_SNAKE_CASE_ = hint.permute(2 , 0 , 1 ).unsqueeze(0 )
SCREAMING_SNAKE_CASE_ = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = KandinskyVaaControlnetPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-controlnet-depth' , torch_dtype=torch.floataa )
SCREAMING_SNAKE_CASE_ = pipeline.to(_lowerCAmelCase )
pipeline.set_progress_bar_config(disable=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = 'A robot, 4k photo'
SCREAMING_SNAKE_CASE_ = torch.Generator(device='cuda' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = pipe_prior(
_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
SCREAMING_SNAKE_CASE_ = torch.Generator(device='cuda' ).manual_seed(0 )
SCREAMING_SNAKE_CASE_ = pipeline(
image_embeds=_lowerCAmelCase , negative_image_embeds=_lowerCAmelCase , hint=_lowerCAmelCase , generator=_lowerCAmelCase , num_inference_steps=100 , output_type='np' , )
SCREAMING_SNAKE_CASE_ = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_lowerCAmelCase , _lowerCAmelCase ) | 225 |
import unittest
import numpy as np
from diffusers import OnnxStableDiffusionInpaintPipelineLegacy
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
load_numpy,
nightly,
require_onnxruntime,
require_torch_gpu,
)
if is_onnx_available():
import onnxruntime as ort
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCamelCase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCAmelCase_ ( self : Optional[Any] ):
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def lowerCAmelCase_ ( self : str ):
SCREAMING_SNAKE_CASE_ = ort.SessionOptions()
SCREAMING_SNAKE_CASE_ = False
return options
def lowerCAmelCase_ ( self : Tuple ):
SCREAMING_SNAKE_CASE_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo.png' )
SCREAMING_SNAKE_CASE_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/overture-creations-5sI6fQgYIuo_mask.png' )
SCREAMING_SNAKE_CASE_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/in_paint/red_cat_sitting_on_a_park_bench_onnx.npy' )
# using the PNDM scheduler by default
SCREAMING_SNAKE_CASE_ = OnnxStableDiffusionInpaintPipelineLegacy.from_pretrained(
'CompVis/stable-diffusion-v1-4' , revision='onnx' , safety_checker=_lowerCAmelCase , feature_extractor=_lowerCAmelCase , provider=self.gpu_provider , sess_options=self.gpu_options , )
pipe.set_progress_bar_config(disable=_lowerCAmelCase )
SCREAMING_SNAKE_CASE_ = 'A red cat sitting on a park bench'
SCREAMING_SNAKE_CASE_ = np.random.RandomState(0 )
SCREAMING_SNAKE_CASE_ = pipe(
prompt=_lowerCAmelCase , image=_lowerCAmelCase , mask_image=_lowerCAmelCase , strength=0.75 , guidance_scale=7.5 , num_inference_steps=15 , generator=_lowerCAmelCase , output_type='np' , )
SCREAMING_SNAKE_CASE_ = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-2 | 225 | 1 |
UpperCamelCase_ = '''0.18.2'''
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor
| 361 |
from sklearn.metrics import matthews_corrcoef
import datasets
UpperCamelCase_ = '''
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
'''
UpperCamelCase_ = '''
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results[\'matthews_correlation\'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results[\'matthews_correlation\'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results[\'matthews_correlation\'], 2))
-0.25
'''
UpperCamelCase_ = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _snake_case ( datasets.Metric ):
'''simple docstring'''
def A__ ( self: Any ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
"""predictions""": datasets.Value("""int32""" ),
"""references""": datasets.Value("""int32""" ),
} ) ,reference_urls=[
"""https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html"""
] ,)
def A__ ( self: List[str] ,lowerCamelCase_: int ,lowerCamelCase_: Any ,lowerCamelCase_: Optional[Any]=None ) -> int:
return {
"matthews_correlation": float(matthews_corrcoef(lowerCamelCase_ ,lowerCamelCase_ ,sample_weight=lowerCamelCase_ ) ),
}
| 59 | 0 |
from __future__ import annotations
import random
import unittest
from transformers import TransfoXLConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLModel,
)
class _lowercase :
def __init__( self : List[Any] , snake_case : Optional[Any] , ) -> Optional[int]:
"""simple docstring"""
UpperCamelCase_ : List[str] = parent
UpperCamelCase_ : str = 1_3
UpperCamelCase_ : List[Any] = 7
UpperCamelCase_ : Union[str, Any] = 3_0
UpperCamelCase_ : Union[str, Any] = self.seq_length + self.mem_len
UpperCamelCase_ : Union[str, Any] = 1_5
UpperCamelCase_ : int = True
UpperCamelCase_ : List[str] = True
UpperCamelCase_ : str = 9_9
UpperCamelCase_ : Union[str, Any] = [1_0, 5_0, 8_0]
UpperCamelCase_ : List[str] = 3_2
UpperCamelCase_ : Any = 3_2
UpperCamelCase_ : Optional[int] = 4
UpperCamelCase_ : Any = 8
UpperCamelCase_ : List[str] = 1_2_8
UpperCamelCase_ : Tuple = 2
UpperCamelCase_ : Any = 2
UpperCamelCase_ : Optional[Any] = None
UpperCamelCase_ : Dict = 1
UpperCamelCase_ : Union[str, Any] = 0
UpperCamelCase_ : Optional[int] = 3
UpperCamelCase_ : Union[str, Any] = self.vocab_size - 1
UpperCamelCase_ : int = 0.01
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Any:
"""simple docstring"""
UpperCamelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ : Optional[int] = None
if self.use_labels:
UpperCamelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCamelCase_ : Tuple = TransfoXLConfig(
vocab_size=self.vocab_size , mem_len=self.mem_len , clamp_len=self.clamp_len , cutoffs=self.cutoffs , d_model=self.hidden_size , d_embed=self.d_embed , n_head=self.num_attention_heads , d_head=self.d_head , d_inner=self.d_inner , div_val=self.div_val , n_layer=self.num_hidden_layers , eos_token_id=self.eos_token_id , pad_token_id=self.vocab_size - 1 , init_range=self.init_range , num_labels=self.num_labels , )
return (config, input_ids_a, input_ids_a, lm_labels)
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> Tuple:
"""simple docstring"""
random.seed(self.seed )
tf.random.set_seed(self.seed )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case : Union[str, Any] , snake_case : Any , snake_case : Dict , snake_case : Any ) -> Tuple:
"""simple docstring"""
UpperCamelCase_ : int = TFTransfoXLModel(snake_case )
UpperCamelCase_, UpperCamelCase_ : Union[str, Any] = model(snake_case ).to_tuple()
UpperCamelCase_ : List[str] = {'input_ids': input_ids_a, 'mems': mems_a}
UpperCamelCase_, UpperCamelCase_ : str = model(snake_case ).to_tuple()
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(hidden_states_a.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case : Optional[int] , snake_case : Optional[int] , snake_case : Union[str, Any] , snake_case : int ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : Tuple = TFTransfoXLLMHeadModel(snake_case )
UpperCamelCase_, UpperCamelCase_ : List[str] = model(snake_case ).to_tuple()
UpperCamelCase_ : int = {'input_ids': input_ids_a, 'labels': lm_labels}
UpperCamelCase_, UpperCamelCase_ : Dict = model(snake_case ).to_tuple()
UpperCamelCase_, UpperCamelCase_ : Tuple = model([input_ids_a, mems_a] ).to_tuple()
UpperCamelCase_ : Optional[Any] = {'input_ids': input_ids_a, 'mems': mems_a, 'labels': lm_labels}
UpperCamelCase_, UpperCamelCase_ : Optional[int] = model(snake_case ).to_tuple()
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
self.parent.assertEqual(lm_logits_a.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertListEqual(
[mem.shape for mem in mems_a] , [(self.mem_len, self.batch_size, self.hidden_size)] * self.num_hidden_layers , )
def SCREAMING_SNAKE_CASE__ ( self : str , snake_case : List[Any] , snake_case : Optional[Any] , snake_case : Tuple , snake_case : Tuple ) -> List[Any]:
"""simple docstring"""
UpperCamelCase_ : str = TFTransfoXLForSequenceClassification(snake_case )
UpperCamelCase_ : List[str] = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : int = self.prepare_config_and_inputs()
((UpperCamelCase_), (UpperCamelCase_), (UpperCamelCase_), (UpperCamelCase_)) : Union[str, Any] = config_and_inputs
UpperCamelCase_ : Union[str, Any] = {'input_ids': input_ids_a}
return config, inputs_dict
@require_tf
class _lowercase ( snake_case_ , snake_case_ , unittest.TestCase ):
lowercase = (
(TFTransfoXLModel, TFTransfoXLLMHeadModel, TFTransfoXLForSequenceClassification) if is_tf_available() else ()
)
lowercase = () if is_tf_available() else ()
lowercase = (
{
'feature-extraction': TFTransfoXLModel,
'text-classification': TFTransfoXLForSequenceClassification,
'text-generation': TFTransfoXLLMHeadModel,
'zero-shot': TFTransfoXLForSequenceClassification,
}
if is_tf_available()
else {}
)
# TODO: add this test when TFTransfoXLLMHead has a linear output layer implemented
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , snake_case : Optional[int] , snake_case : int , snake_case : int , snake_case : Any , snake_case : str ) -> int:
"""simple docstring"""
if pipeline_test_casse_name == "TextGenerationPipelineTests":
# Get `ValueError: AttributeError: 'NoneType' object has no attribute 'new_ones'` or `AssertionError`.
# `TransfoXLConfig` was never used in pipeline tests: cannot create a simple
# tokenizer.
return True
return False
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : int = TFTransfoXLModelTester(self )
UpperCamelCase_ : Optional[Any] = ConfigTester(self , config_class=snake_case , d_embed=3_7 )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> List[Any]:
"""simple docstring"""
self.config_tester.run_common_tests()
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
self.model_tester.set_seed()
UpperCamelCase_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_model(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Union[str, Any]:
"""simple docstring"""
self.model_tester.set_seed()
UpperCamelCase_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_lm_head(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_transfo_xl_for_sequence_classification(*snake_case )
def SCREAMING_SNAKE_CASE__ ( self : Any ) -> Dict:
"""simple docstring"""
UpperCamelCase_, UpperCamelCase_ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
UpperCamelCase_ : Dict = [TFTransfoXLForSequenceClassification]
for model_class in self.all_model_classes:
UpperCamelCase_ : str = model_class(snake_case )
assert isinstance(model.get_input_embeddings() , tf.keras.layers.Layer )
if model_class in list_other_models_with_output_ebd:
UpperCamelCase_ : Any = model.get_output_embeddings()
assert isinstance(snake_case , tf.keras.layers.Layer )
UpperCamelCase_ : Any = model.get_bias()
assert name is None
else:
UpperCamelCase_ : Optional[int] = model.get_output_embeddings()
assert x is None
UpperCamelCase_ : str = model.get_bias()
assert name is None
def SCREAMING_SNAKE_CASE__ ( self : Union[str, Any] ) -> Any:
"""simple docstring"""
pass
@slow
def SCREAMING_SNAKE_CASE__ ( self : List[str] ) -> List[str]:
"""simple docstring"""
for model_name in TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCamelCase_ : List[str] = TFTransfoXLModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
@unittest.skip(reason='This model doesn\'t play well with fit() due to not returning a single loss.' )
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ) -> str:
"""simple docstring"""
pass
@require_tf
class _lowercase ( unittest.TestCase ):
@unittest.skip('Skip test until #12651 is resolved.' )
@slow
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Any:
"""simple docstring"""
UpperCamelCase_ : Optional[int] = TFTransfoXLLMHeadModel.from_pretrained('transfo-xl-wt103' )
# fmt: off
UpperCamelCase_ : Tuple = tf.convert_to_tensor([[3_3,1_2_9_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_2,1_7_0_6,1_7,2_0_0_9_8,5,3_2_1_5,2_1,3_7,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,6_2_2_4,8_3_1,1_6_0_0_2,2,8,6_0_3,7_8_9_6_7,2_9_5_4_6,2_3,8_0_3,2_0,2_5,4_1_6,5,8,2_3_2,4,2_7_7,6,1_8_5_5,4_6_0_1,3,2_9_5_4_6,5_4,8,3_6_0_9,5,5_7_2_1_1,4_9,4,1,2_7_7,1_8,8,1_7_5_5,1_5_6_9_1,3,3_4_1,2_5,4_1_6,6_9_3,4_2_5_7_3,7_1,1_7,4_0_1,9_4,3_1,1_7_9_1_9,2,2_9_5_4_6,7_8_7_3,1_8,1,4_3_5,2_3,1_1_0_1_1,7_5_5,5,5_1_6_7,3,7_9_8_3,9_8,8_4,2,2_9_5_4_6,3_2_6_7,8,3_6_0_9,4,1,4_8_6_5,1_0_7_5,2,6_0_8_7,7_1,6,3_4_6,8,5_8_5_4,3,2_9_5_4_6,8_2_4,1_4_0_0,1_8_6_8,2,1_9,1_6_0,2,3_1_1,8,5_4_9_6,2,2_0_9_2_0,1_7,2_5,1_5_0_9_7,3,2_4,2_4,0]] , dtype=tf.intaa ) # noqa: E231
# fmt: on
# In 1991 , the remains of Russian Tsar Nicholas II and his family
# ( except for Alexei and Maria ) are discovered .
# The voice of Nicholas's young son , Tsarevich Alexei Nikolaevich , narrates the
# remainder of the story . 1883 Western Siberia ,
# a young Grigori Rasputin is asked by his father and a group of men to perform magic .
# Rasputin has a vision and denounces one of the men as a horse thief . Although his
# father initially slaps him for making such an accusation , Rasputin watches as the
# man is chased outside and beaten . Twenty years later , Rasputin sees a vision of
# the Virgin Mary , prompting him to become a priest . Rasputin quickly becomes famous ,
# with people , even a bishop , begging for his blessing . <eod> </s> <eos>
# fmt: off
UpperCamelCase_ : Optional[Any] = [3_3,1_2_9_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_2,1_7_0_6,1_7,2_0_0_9_8,5,3_2_1_5,2_1,3_7,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,6_2_2_4,8_3_1,1_6_0_0_2,2,8,6_0_3,7_8_9_6_7,2_9_5_4_6,2_3,8_0_3,2_0,2_5,4_1_6,5,8,2_3_2,4,2_7_7,6,1_8_5_5,4_6_0_1,3,2_9_5_4_6,5_4,8,3_6_0_9,5,5_7_2_1_1,4_9,4,1,2_7_7,1_8,8,1_7_5_5,1_5_6_9_1,3,3_4_1,2_5,4_1_6,6_9_3,4_2_5_7_3,7_1,1_7,4_0_1,9_4,3_1,1_7_9_1_9,2,2_9_5_4_6,7_8_7_3,1_8,1,4_3_5,2_3,1_1_0_1_1,7_5_5,5,5_1_6_7,3,7_9_8_3,9_8,8_4,2,2_9_5_4_6,3_2_6_7,8,3_6_0_9,4,1,4_8_6_5,1_0_7_5,2,6_0_8_7,7_1,6,3_4_6,8,5_8_5_4,3,2_9_5_4_6,8_2_4,1_4_0_0,1_8_6_8,2,1_9,1_6_0,2,3_1_1,8,5_4_9_6,2,2_0_9_2_0,1_7,2_5,1_5_0_9_7,3,2_4,2_4,0,3_3,1,1_8_5_7,2,1,1_0_0_9,4,1_1_0_9,1_1_7_3_9,4_7_6_2,3_5_8,5,2_5,2_4_5,2_8,1_1_1_0,3,1_3,1_0_4_1,4,2_4,6_0_3,4_9_0,2,7_1_4_7_7,2_0_0_9_8,1_0_4_4_4_7,2,2_0_9_6_1,1,2_6_0_4,4,1,3_2_9,3,0] # noqa: E231
# fmt: on
# In 1991, the remains of Russian Tsar Nicholas II and his family (
# except for Alexei and Maria ) are discovered. The voice of young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.
# 1883 Western Siberia, a young Grigori Rasputin is asked by his father
# and a group of men to perform magic. Rasputin has a vision and
# denounces one of the men as a horse thief. Although his father initially
# slaps him for making such an accusation, Rasputin watches as the man
# is chased outside and beaten. Twenty years later, Rasputin sees a vision
# of the Virgin Mary, prompting him to become a priest.
# Rasputin quickly becomes famous, with people, even a bishop, begging for
# his blessing. <unk> <unk> <eos> In the 1990s, the remains of Russian Tsar
# Nicholas II and his family were discovered. The voice of <unk> young son,
# Tsarevich Alexei Nikolaevich, narrates the remainder of the story.<eos>
UpperCamelCase_ : List[Any] = model.generate(snake_case , max_length=2_0_0 , do_sample=snake_case )
self.assertListEqual(output_ids[0].numpy().tolist() , snake_case )
| 175 | import builtins
import sys
from ...utils.imports import _is_package_available
from . import cursor, input
from .helpers import Direction, clear_line, forceWrite, linebreak, move_cursor, reset_cursor, writeColor
from .keymap import KEYMAP
a_ = False
try:
a_ = _is_package_available('google.colab')
except ModuleNotFoundError:
pass
@input.register
class _lowercase :
def __init__( self : Dict , snake_case : str = None , snake_case : list = [] ) -> Union[str, Any]:
"""simple docstring"""
UpperCamelCase_ : Any = 0
UpperCamelCase_ : Optional[Any] = choices
UpperCamelCase_ : Any = prompt
if sys.platform == "win32":
UpperCamelCase_ : Optional[Any] = '*'
else:
UpperCamelCase_ : str = '➔ '
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] , snake_case : List[str] , snake_case : str = "" ) -> List[Any]:
"""simple docstring"""
if sys.platform != "win32":
writeColor(self.choices[index] , 3_2 , snake_case )
else:
forceWrite(self.choices[index] , snake_case )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , snake_case : int ) -> List[Any]:
"""simple docstring"""
if index == self.position:
forceWrite(f" {self.arrow_char} " )
self.write_choice(snake_case )
else:
forceWrite(f" {self.choices[index]}" )
reset_cursor()
def SCREAMING_SNAKE_CASE__ ( self : Any , snake_case : Direction , snake_case : int = 1 ) -> List[str]:
"""simple docstring"""
UpperCamelCase_ : Any = self.position
if direction == Direction.DOWN:
if self.position + 1 >= len(self.choices ):
return
self.position += num_spaces
else:
if self.position - 1 < 0:
return
self.position -= num_spaces
clear_line()
self.print_choice(snake_case )
move_cursor(snake_case , direction.name )
self.print_choice(self.position )
@input.mark(KEYMAP['up'] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> List[Any]:
"""simple docstring"""
self.move_direction(Direction.UP )
@input.mark(KEYMAP['down'] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[int] ) -> Optional[Any]:
"""simple docstring"""
self.move_direction(Direction.DOWN )
@input.mark(KEYMAP['newline'] )
def SCREAMING_SNAKE_CASE__ ( self : str ) -> Optional[int]:
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , 'DOWN' )
return self.position
@input.mark(KEYMAP['interrupt'] )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
move_cursor(len(self.choices ) - self.position , 'DOWN' )
raise KeyboardInterrupt
@input.mark_multiple(*[KEYMAP[str(snake_case )] for number in range(1_0 )] )
def SCREAMING_SNAKE_CASE__ ( self : int ) -> Dict:
"""simple docstring"""
UpperCamelCase_ : List[Any] = int(chr(self.current_selection ) )
UpperCamelCase_ : Optional[int] = index - self.position
if index == self.position:
return
if index < len(self.choices ):
if self.position > index:
self.move_direction(Direction.UP , -movement )
elif self.position < index:
self.move_direction(Direction.DOWN , snake_case )
else:
return
else:
return
def SCREAMING_SNAKE_CASE__ ( self : List[Any] , snake_case : int = 0 ) -> Union[str, Any]:
"""simple docstring"""
if self.prompt:
linebreak()
forceWrite(self.prompt , '\n' )
if in_colab:
forceWrite('Please input a choice index (starting from 0), and press enter' , '\n' )
else:
forceWrite('Please select a choice using the arrow or number keys, and selecting with enter' , '\n' )
UpperCamelCase_ : Optional[int] = default_choice
for i in range(len(self.choices ) ):
self.print_choice(snake_case )
forceWrite('\n' )
move_cursor(len(self.choices ) - self.position , 'UP' )
with cursor.hide():
while True:
if in_colab:
try:
UpperCamelCase_ : Tuple = int(builtins.input() )
except ValueError:
UpperCamelCase_ : Tuple = default_choice
else:
UpperCamelCase_ : Optional[Any] = self.handle_input()
if choice is not None:
reset_cursor()
for _ in range(len(self.choices ) + 1 ):
move_cursor(1 , 'UP' )
clear_line()
self.write_choice(snake_case , '\n' )
return choice
| 175 | 1 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCamelCase : List[Any] =16
lowerCamelCase : int =32
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase = 16 , __lowerCAmelCase = "bert-base-cased" ) -> Union[str, Any]:
UpperCamelCase__ : Tuple = AutoTokenizer.from_pretrained(__lowerCAmelCase )
UpperCamelCase__ : str = load_dataset("glue" , "mrpc" )
def tokenize_function(__lowerCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
UpperCamelCase__ : Dict = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=__lowerCAmelCase , max_length=__lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
UpperCamelCase__ : Any = datasets.map(
__lowerCAmelCase , batched=__lowerCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=__lowerCAmelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCamelCase__ : Union[str, Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(__lowerCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(__lowerCAmelCase , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(__lowerCAmelCase , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
UpperCamelCase__ : int = DataLoader(
tokenized_datasets["train"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
UpperCamelCase__ : Tuple = DataLoader(
tokenized_datasets["validation"] , shuffle=__lowerCAmelCase , collate_fn=__lowerCAmelCase , batch_size=__lowerCAmelCase )
return train_dataloader, eval_dataloader
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> List[str]:
# Initialize accelerator
UpperCamelCase__ : str = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCamelCase__ : int = config["lr"]
UpperCamelCase__ : Optional[Any] = int(config["num_epochs"] )
UpperCamelCase__ : Optional[Any] = int(config["seed"] )
UpperCamelCase__ : List[Any] = int(config["batch_size"] )
UpperCamelCase__ : Dict = args.model_name_or_path
set_seed(__lowerCAmelCase )
UpperCamelCase__ , UpperCamelCase__ : List[str] = get_dataloaders(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCamelCase__ : List[Any] = AutoModelForSequenceClassification.from_pretrained(__lowerCAmelCase , return_dict=__lowerCAmelCase )
# Instantiate optimizer
UpperCamelCase__ : List[str] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
UpperCamelCase__ : Tuple = optimizer_cls(params=model.parameters() , lr=__lowerCAmelCase )
if accelerator.state.deepspeed_plugin is not None:
UpperCamelCase__ : Union[str, Any] = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
UpperCamelCase__ : List[Any] = 1
UpperCamelCase__ : Optional[int] = (len(__lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
UpperCamelCase__ : List[Any] = get_linear_schedule_with_warmup(
optimizer=__lowerCAmelCase , num_warmup_steps=0 , num_training_steps=__lowerCAmelCase , )
else:
UpperCamelCase__ : Tuple = DummyScheduler(__lowerCAmelCase , total_num_steps=__lowerCAmelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ : Any = accelerator.prepare(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
# We need to keep track of how many total steps we have iterated over
UpperCamelCase__ : Union[str, Any] = 0
# We also need to keep track of the stating epoch so files are named properly
UpperCamelCase__ : str = 0
# Now we train the model
UpperCamelCase__ : List[Any] = evaluate.load("glue" , "mrpc" )
UpperCamelCase__ : Optional[int] = 0
UpperCamelCase__ : int = {}
for epoch in range(__lowerCAmelCase , __lowerCAmelCase ):
model.train()
for step, batch in enumerate(__lowerCAmelCase ):
UpperCamelCase__ : str = model(**__lowerCAmelCase )
UpperCamelCase__ : Any = outputs.loss
UpperCamelCase__ : List[str] = loss / gradient_accumulation_steps
accelerator.backward(__lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
UpperCamelCase__ : List[str] = 0
for step, batch in enumerate(__lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCamelCase__ : Any = model(**__lowerCAmelCase )
UpperCamelCase__ : Dict = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
UpperCamelCase__ , UpperCamelCase__ : Optional[int] = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(__lowerCAmelCase ) - 1:
UpperCamelCase__ : Tuple = predictions[: len(eval_dataloader.dataset ) - samples_seen]
UpperCamelCase__ : Optional[int] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=__lowerCAmelCase , references=__lowerCAmelCase , )
UpperCamelCase__ : int = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(f'epoch {epoch}:' , __lowerCAmelCase )
UpperCamelCase__ : int = eval_metric["accuracy"]
if best_performance < eval_metric["accuracy"]:
UpperCamelCase__ : Dict = eval_metric["accuracy"]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), f'Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "all_results.json" ) , "w" ) as f:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( ) -> Union[str, Any]:
UpperCamelCase__ : str = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=__lowerCAmelCase , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=__lowerCAmelCase , )
parser.add_argument(
"--output_dir" , type=__lowerCAmelCase , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--performance_lower_bound" , type=__lowerCAmelCase , default=__lowerCAmelCase , help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value." , )
parser.add_argument(
"--num_epochs" , type=__lowerCAmelCase , default=3 , help="Number of train epochs." , )
UpperCamelCase__ : Tuple = parser.parse_args()
UpperCamelCase__ : List[Any] = {"lr": 2E-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(__lowerCAmelCase , __lowerCAmelCase )
if __name__ == "__main__":
main() | 196 |
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class __a :
_lowerCAmelCase : CommonSchedulerState
# setable values
_lowerCAmelCase : jnp.ndarray
_lowerCAmelCase : jnp.ndarray
_lowerCAmelCase : Optional[int] = None
@classmethod
def __lowercase ( cls : Optional[Any] , SCREAMING_SNAKE_CASE : CommonSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray ):
'''simple docstring'''
return cls(common=SCREAMING_SNAKE_CASE , init_noise_sigma=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE )
@dataclass
class __a ( A__ ):
_lowerCAmelCase : DDPMSchedulerState
class __a ( A__ , A__ ):
_lowerCAmelCase : Tuple = [e.name for e in FlaxKarrasDiffusionSchedulers]
_lowerCAmelCase : jnp.dtype
@property
def __lowercase ( self : List[str] ):
'''simple docstring'''
return True
@register_to_config
def __init__( self : Dict , SCREAMING_SNAKE_CASE : int = 10_00 , SCREAMING_SNAKE_CASE : float = 0.0_0_0_1 , SCREAMING_SNAKE_CASE : float = 0.0_2 , SCREAMING_SNAKE_CASE : str = "linear" , SCREAMING_SNAKE_CASE : Optional[jnp.ndarray] = None , SCREAMING_SNAKE_CASE : str = "fixed_small" , SCREAMING_SNAKE_CASE : bool = True , SCREAMING_SNAKE_CASE : str = "epsilon" , SCREAMING_SNAKE_CASE : jnp.dtype = jnp.floataa , ):
'''simple docstring'''
UpperCamelCase__ : int = dtype
def __lowercase ( self : List[Any] , SCREAMING_SNAKE_CASE : Optional[CommonSchedulerState] = None ):
'''simple docstring'''
if common is None:
UpperCamelCase__ : Tuple = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
UpperCamelCase__ : str = jnp.array(1.0 , dtype=self.dtype )
UpperCamelCase__ : Any = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=SCREAMING_SNAKE_CASE , init_noise_sigma=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE , )
def __lowercase ( self : Tuple , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : Optional[int] = None ):
'''simple docstring'''
return sample
def __lowercase ( self : Tuple , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : Tuple = () ):
'''simple docstring'''
UpperCamelCase__ : Optional[Any] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
UpperCamelCase__ : Optional[int] = (jnp.arange(0 , SCREAMING_SNAKE_CASE ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=SCREAMING_SNAKE_CASE , timesteps=SCREAMING_SNAKE_CASE , )
def __lowercase ( self : str , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : List[str] , SCREAMING_SNAKE_CASE : Dict=None , SCREAMING_SNAKE_CASE : Optional[Any]=None ):
'''simple docstring'''
UpperCamelCase__ : Optional[int] = state.common.alphas_cumprod[t]
UpperCamelCase__ : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
UpperCamelCase__ : int = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
UpperCamelCase__ : List[Any] = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
UpperCamelCase__ : Union[str, Any] = jnp.clip(SCREAMING_SNAKE_CASE , a_min=1e-2_0 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
UpperCamelCase__ : Optional[Any] = jnp.log(jnp.clip(SCREAMING_SNAKE_CASE , a_min=1e-2_0 ) )
elif variance_type == "fixed_large":
UpperCamelCase__ : Dict = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
UpperCamelCase__ : List[str] = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
UpperCamelCase__ : Tuple = variance
UpperCamelCase__ : int = state.common.betas[t]
UpperCamelCase__ : Union[str, Any] = (predicted_variance + 1) / 2
UpperCamelCase__ : List[str] = frac * max_log + (1 - frac) * min_log
return variance
def __lowercase ( self : Optional[Any] , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : int , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : Optional[jax.random.KeyArray] = None , SCREAMING_SNAKE_CASE : bool = True , ):
'''simple docstring'''
UpperCamelCase__ : str = timestep
if key is None:
UpperCamelCase__ : str = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
UpperCamelCase__ , UpperCamelCase__ : Optional[int] = jnp.split(SCREAMING_SNAKE_CASE , sample.shape[1] , axis=1 )
else:
UpperCamelCase__ : Optional[int] = None
# 1. compute alphas, betas
UpperCamelCase__ : Optional[int] = state.common.alphas_cumprod[t]
UpperCamelCase__ : Dict = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
UpperCamelCase__ : Any = 1 - alpha_prod_t
UpperCamelCase__ : List[str] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
UpperCamelCase__ : Optional[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
UpperCamelCase__ : Dict = model_output
elif self.config.prediction_type == "v_prediction":
UpperCamelCase__ : str = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
F'prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
UpperCamelCase__ : List[str] = jnp.clip(SCREAMING_SNAKE_CASE , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCamelCase__ : List[Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
UpperCamelCase__ : str = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
UpperCamelCase__ : Any = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
UpperCamelCase__ : str = jax.random.split(SCREAMING_SNAKE_CASE , num=1 )
UpperCamelCase__ : Tuple = jax.random.normal(SCREAMING_SNAKE_CASE , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , predicted_variance=SCREAMING_SNAKE_CASE ) ** 0.5) * noise
UpperCamelCase__ : Any = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
UpperCamelCase__ : List[str] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE , state=SCREAMING_SNAKE_CASE )
def __lowercase ( self : Any , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , ):
'''simple docstring'''
return add_noise_common(state.common , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __lowercase ( self : Dict , SCREAMING_SNAKE_CASE : DDPMSchedulerState , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , SCREAMING_SNAKE_CASE : jnp.ndarray , ):
'''simple docstring'''
return get_velocity_common(state.common , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
def __len__( self : int ):
'''simple docstring'''
return self.config.num_train_timesteps | 196 | 1 |
"""simple docstring"""
# XXX: we want transformers master here - in the absense of conftest manipulating sys.path:
# hack it in for now:
import sys
from pathlib import Path
_a : List[str] = Path(__file__).resolve().parents[3] / 'src'
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(42)
_a : str = {'base': 'patrickvonplaten/wav2vec2_tiny_random', 'robust': 'patrickvonplaten/wav2vec2_tiny_random_robust'}
_a : str = 'zero2'
_a : Any = 'zero3'
_a : Dict = [ZEROa, ZEROa]
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Dict ,_lowerCamelCase : str ,_lowerCamelCase : Optional[int] ) -> List[str]:
# customize the test name generator function as we want both params to appear in the sub-test
# name, as by default it shows only the first param
_lowerCAmelCase : Union[str, Any] = parameterized.to_safe_name("""_""".join(str(_lowerCamelCase ) for x in param.args ) )
return f"{func.__name__}_{param_based_name}"
# Cartesian-product of zero stages with models to test
_a : Tuple = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class __A ( SCREAMING_SNAKE_CASE_ ):
@parameterized.expand(a__ , name_func=a__ )
def __A ( self , a__ , a__ ):
self.run_and_check(
stage=a__ , model=a__ , distributed=a__ , fpaa=a__ , )
@require_torch_multi_gpu
@parameterized.expand(a__ , name_func=a__ )
def __A ( self , a__ , a__ ):
self.run_and_check(
stage=a__ , model=a__ , distributed=a__ , fpaa=a__ , )
@parameterized.expand(a__ , name_func=a__ )
def __A ( self , a__ , a__ ):
self.run_and_check(
stage=a__ , model=a__ , distributed=a__ , fpaa=a__ , )
@require_torch_multi_gpu
@parameterized.expand(a__ , name_func=a__ )
def __A ( self , a__ , a__ ):
self.run_and_check(
stage=a__ , model=a__ , distributed=a__ , fpaa=a__ , )
def __A ( self , a__ ):
# XXX: run_asr is premature and doesn't save any results
# so all we check for now is that the process didn't fail
pass
def __A ( self , a__ , a__ , a__ = 10 , a__ = True , a__ = True , a__ = True , ):
_lowerCAmelCase : Union[str, Any] = models[model]
_lowerCAmelCase : Union[str, Any] = self.run_trainer(
stage=a__ , model_name=a__ , eval_steps=a__ , num_train_epochs=1 , distributed=a__ , fpaa=a__ , )
self.do_checks(a__ )
return output_dir
def __A ( self , a__ , a__ , a__ = 10 , a__ = 1 , a__ = True , a__ = True , ):
_lowerCAmelCase : Union[str, Any] = self.get_auto_remove_tmp_dir("""./xxx""" , after=a__ )
_lowerCAmelCase : Tuple = F"\n --model_name_or_path {model_name}\n --dataset_name hf-internal-testing/librispeech_asr_dummy\n --dataset_config_name clean\n --train_split_name validation\n --validation_split_name validation\n --output_dir {output_dir}\n --num_train_epochs {str(a__ )}\n --per_device_train_batch_size 2\n --per_device_eval_batch_size 2\n --evaluation_strategy steps\n --learning_rate 5e-4\n --warmup_steps 8\n --orthography timit\n --preprocessing_num_workers 1\n --group_by_length\n --freeze_feature_extractor\n --report_to none\n --save_steps 0\n --eval_steps {eval_steps}\n --report_to none\n ".split()
if fpaa:
args.extend(["""--fp16"""] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
_lowerCAmelCase : int = F"--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json".split()
_lowerCAmelCase : Union[str, Any] = [F"{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"]
_lowerCAmelCase : Tuple = self.get_launcher(a__ )
_lowerCAmelCase : Dict = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(a__ , env=self.get_env() )
return output_dir
def __A ( self , a__=False ):
# 1. explicitly set --num_nodes=1 just in case these tests end up run on a multi-node setup
# - it won't be able to handle that
# 2. for now testing with just 2 gpus max (since some quality tests may give different
# results with mode gpus because we use very little data)
_lowerCAmelCase : str = min(2 , get_gpu_count() ) if distributed else 1
return F"deepspeed --num_nodes 1 --num_gpus {num_gpus}".split()
| 44 | """simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_a : List[Any] = logging.get_logger(__name__)
_a : Any = {
'microsoft/swinv2-tiny-patch4-window8-256': (
'https://huggingface.co/microsoft/swinv2-tiny-patch4-window8-256/resolve/main/config.json'
),
}
class __A ( SCREAMING_SNAKE_CASE_ ):
_UpperCamelCase : int = "swinv2"
_UpperCamelCase : List[str] = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self , a__=224 , a__=4 , a__=3 , a__=96 , a__=[2, 2, 6, 2] , a__=[3, 6, 12, 24] , a__=7 , a__=4.0 , a__=True , a__=0.0 , a__=0.0 , a__=0.1 , a__="gelu" , a__=False , a__=0.0_2 , a__=1e-5 , a__=32 , **a__ , ):
super().__init__(**a__ )
_lowerCAmelCase : int = image_size
_lowerCAmelCase : Optional[Any] = patch_size
_lowerCAmelCase : Any = num_channels
_lowerCAmelCase : List[Any] = embed_dim
_lowerCAmelCase : Optional[int] = depths
_lowerCAmelCase : List[Any] = len(a__ )
_lowerCAmelCase : Any = num_heads
_lowerCAmelCase : Tuple = window_size
_lowerCAmelCase : Tuple = mlp_ratio
_lowerCAmelCase : Any = qkv_bias
_lowerCAmelCase : Optional[int] = hidden_dropout_prob
_lowerCAmelCase : Tuple = attention_probs_dropout_prob
_lowerCAmelCase : str = drop_path_rate
_lowerCAmelCase : List[str] = hidden_act
_lowerCAmelCase : List[str] = use_absolute_embeddings
_lowerCAmelCase : Optional[int] = layer_norm_eps
_lowerCAmelCase : List[Any] = initializer_range
_lowerCAmelCase : Any = encoder_stride
# we set the hidden_size attribute in order to make Swinv2 work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_lowerCAmelCase : Tuple = int(embed_dim * 2 ** (len(a__ ) - 1) )
_lowerCAmelCase : Tuple = (0, 0, 0, 0)
| 44 | 1 |
'''simple docstring'''
from math import factorial, pi
def _UpperCAmelCase ( _UpperCamelCase : float, _UpperCamelCase : int = 30 ) -> float:
if not isinstance(_UpperCamelCase, (int, float) ):
raise ValueError('''maclaurin_sin() requires either an int or float for theta''' )
if not isinstance(_UpperCamelCase, _UpperCamelCase ) or accuracy <= 0:
raise ValueError('''maclaurin_sin() requires a positive int for accuracy''' )
A_ = float(_UpperCamelCase )
A_ = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(_UpperCamelCase ) )
def _UpperCAmelCase ( _UpperCamelCase : float, _UpperCamelCase : int = 30 ) -> float:
if not isinstance(_UpperCamelCase, (int, float) ):
raise ValueError('''maclaurin_cos() requires either an int or float for theta''' )
if not isinstance(_UpperCamelCase, _UpperCamelCase ) or accuracy <= 0:
raise ValueError('''maclaurin_cos() requires a positive int for accuracy''' )
A_ = float(_UpperCamelCase )
A_ = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(_UpperCamelCase ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 18 | '''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import ShapEPipeline
else:
from .camera import create_pan_cameras
from .pipeline_shap_e import ShapEPipeline
from .pipeline_shap_e_img2img import ShapEImgaImgPipeline
from .renderer import (
BoundingBoxVolume,
ImportanceRaySampler,
MLPNeRFModelOutput,
MLPNeRSTFModel,
ShapEParamsProjModel,
ShapERenderer,
StratifiedRaySampler,
VoidNeRFModel,
)
| 18 | 1 |
import argparse
import os
import shutil
from pathlib import Path
import onnx
import torch
from packaging import version
from torch.onnx import export
from diffusers import OnnxRuntimeModel, OnnxStableDiffusionPipeline, StableDiffusionPipeline
SCREAMING_SNAKE_CASE__ : Dict = version.parse(version.parse(torch.__version__).base_version) < version.parse('1.11')
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE=False ,) -> List[Any]:
output_path.parent.mkdir(parents=_SCREAMING_SNAKE_CASE ,exist_ok=_SCREAMING_SNAKE_CASE )
# PyTorch deprecated the `enable_onnx_checker` and `use_external_data_format` arguments in v1.11,
# so we check the torch version for backwards compatibility
if is_torch_less_than_1_11:
export(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,f=output_path.as_posix() ,input_names=_SCREAMING_SNAKE_CASE ,output_names=_SCREAMING_SNAKE_CASE ,dynamic_axes=_SCREAMING_SNAKE_CASE ,do_constant_folding=_SCREAMING_SNAKE_CASE ,use_external_data_format=_SCREAMING_SNAKE_CASE ,enable_onnx_checker=_SCREAMING_SNAKE_CASE ,opset_version=_SCREAMING_SNAKE_CASE ,)
else:
export(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,f=output_path.as_posix() ,input_names=_SCREAMING_SNAKE_CASE ,output_names=_SCREAMING_SNAKE_CASE ,dynamic_axes=_SCREAMING_SNAKE_CASE ,do_constant_folding=_SCREAMING_SNAKE_CASE ,opset_version=_SCREAMING_SNAKE_CASE ,)
@torch.no_grad()
def A ( _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE = False ) -> List[Any]:
lowerCamelCase : Union[str, Any] = torch.floataa if fpaa else torch.floataa
if fpaa and torch.cuda.is_available():
lowerCamelCase : Union[str, Any] = "cuda"
elif fpaa and not torch.cuda.is_available():
raise ValueError("`float16` model export is only supported on GPUs with CUDA" )
else:
lowerCamelCase : Dict = "cpu"
lowerCamelCase : int = StableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE ,torch_dtype=_SCREAMING_SNAKE_CASE ).to(_SCREAMING_SNAKE_CASE )
lowerCamelCase : Tuple = Path(_SCREAMING_SNAKE_CASE )
# TEXT ENCODER
lowerCamelCase : Dict = pipeline.text_encoder.config.max_position_embeddings
lowerCamelCase : Dict = pipeline.text_encoder.config.hidden_size
lowerCamelCase : Dict = pipeline.tokenizer(
"A sample prompt" ,padding="max_length" ,max_length=pipeline.tokenizer.model_max_length ,truncation=_SCREAMING_SNAKE_CASE ,return_tensors="pt" ,)
onnx_export(
pipeline.text_encoder ,model_args=(text_input.input_ids.to(device=_SCREAMING_SNAKE_CASE ,dtype=torch.intaa )) ,output_path=output_path / "text_encoder" / "model.onnx" ,ordered_input_names=["input_ids"] ,output_names=["last_hidden_state", "pooler_output"] ,dynamic_axes={
"input_ids": {0: "batch", 1: "sequence"},
} ,opset=_SCREAMING_SNAKE_CASE ,)
del pipeline.text_encoder
# UNET
lowerCamelCase : int = pipeline.unet.config.in_channels
lowerCamelCase : Any = pipeline.unet.config.sample_size
lowerCamelCase : List[str] = output_path / "unet" / "model.onnx"
onnx_export(
pipeline.unet ,model_args=(
torch.randn(2 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE ,dtype=_SCREAMING_SNAKE_CASE ),
torch.randn(2 ).to(device=_SCREAMING_SNAKE_CASE ,dtype=_SCREAMING_SNAKE_CASE ),
torch.randn(2 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE ,dtype=_SCREAMING_SNAKE_CASE ),
False,
) ,output_path=_SCREAMING_SNAKE_CASE ,ordered_input_names=["sample", "timestep", "encoder_hidden_states", "return_dict"] ,output_names=["out_sample"] ,dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"timestep": {0: "batch"},
"encoder_hidden_states": {0: "batch", 1: "sequence"},
} ,opset=_SCREAMING_SNAKE_CASE ,use_external_data_format=_SCREAMING_SNAKE_CASE ,)
lowerCamelCase : List[str] = str(unet_path.absolute().as_posix() )
lowerCamelCase : Optional[int] = os.path.dirname(_SCREAMING_SNAKE_CASE )
lowerCamelCase : str = onnx.load(_SCREAMING_SNAKE_CASE )
# clean up existing tensor files
shutil.rmtree(_SCREAMING_SNAKE_CASE )
os.mkdir(_SCREAMING_SNAKE_CASE )
# collate external tensor files into one
onnx.save_model(
_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,save_as_external_data=_SCREAMING_SNAKE_CASE ,all_tensors_to_one_file=_SCREAMING_SNAKE_CASE ,location="weights.pb" ,convert_attribute=_SCREAMING_SNAKE_CASE ,)
del pipeline.unet
# VAE ENCODER
lowerCamelCase : int = pipeline.vae
lowerCamelCase : Optional[Any] = vae_encoder.config.in_channels
lowerCamelCase : int = vae_encoder.config.sample_size
# need to get the raw tensor output (sample) from the encoder
lowerCamelCase : str = lambda _SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE : vae_encoder.encode(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )[0].sample()
onnx_export(
_SCREAMING_SNAKE_CASE ,model_args=(
torch.randn(1 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE ,dtype=_SCREAMING_SNAKE_CASE ),
False,
) ,output_path=output_path / "vae_encoder" / "model.onnx" ,ordered_input_names=["sample", "return_dict"] ,output_names=["latent_sample"] ,dynamic_axes={
"sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} ,opset=_SCREAMING_SNAKE_CASE ,)
# VAE DECODER
lowerCamelCase : int = pipeline.vae
lowerCamelCase : Optional[int] = vae_decoder.config.latent_channels
lowerCamelCase : str = vae_decoder.config.out_channels
# forward only through the decoder part
lowerCamelCase : str = vae_encoder.decode
onnx_export(
_SCREAMING_SNAKE_CASE ,model_args=(
torch.randn(1 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE ,dtype=_SCREAMING_SNAKE_CASE ),
False,
) ,output_path=output_path / "vae_decoder" / "model.onnx" ,ordered_input_names=["latent_sample", "return_dict"] ,output_names=["sample"] ,dynamic_axes={
"latent_sample": {0: "batch", 1: "channels", 2: "height", 3: "width"},
} ,opset=_SCREAMING_SNAKE_CASE ,)
del pipeline.vae
# SAFETY CHECKER
if pipeline.safety_checker is not None:
lowerCamelCase : int = pipeline.safety_checker
lowerCamelCase : str = safety_checker.config.vision_config.num_channels
lowerCamelCase : Tuple = safety_checker.config.vision_config.image_size
lowerCamelCase : int = safety_checker.forward_onnx
onnx_export(
pipeline.safety_checker ,model_args=(
torch.randn(
1 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,).to(device=_SCREAMING_SNAKE_CASE ,dtype=_SCREAMING_SNAKE_CASE ),
torch.randn(1 ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ).to(device=_SCREAMING_SNAKE_CASE ,dtype=_SCREAMING_SNAKE_CASE ),
) ,output_path=output_path / "safety_checker" / "model.onnx" ,ordered_input_names=["clip_input", "images"] ,output_names=["out_images", "has_nsfw_concepts"] ,dynamic_axes={
"clip_input": {0: "batch", 1: "channels", 2: "height", 3: "width"},
"images": {0: "batch", 1: "height", 2: "width", 3: "channels"},
} ,opset=_SCREAMING_SNAKE_CASE ,)
del pipeline.safety_checker
lowerCamelCase : List[Any] = OnnxRuntimeModel.from_pretrained(output_path / "safety_checker" )
lowerCamelCase : Optional[Any] = pipeline.feature_extractor
else:
lowerCamelCase : List[Any] = None
lowerCamelCase : Optional[int] = None
lowerCamelCase : str = OnnxStableDiffusionPipeline(
vae_encoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_encoder" ) ,vae_decoder=OnnxRuntimeModel.from_pretrained(output_path / "vae_decoder" ) ,text_encoder=OnnxRuntimeModel.from_pretrained(output_path / "text_encoder" ) ,tokenizer=pipeline.tokenizer ,unet=OnnxRuntimeModel.from_pretrained(output_path / "unet" ) ,scheduler=pipeline.scheduler ,safety_checker=_SCREAMING_SNAKE_CASE ,feature_extractor=_SCREAMING_SNAKE_CASE ,requires_safety_checker=safety_checker is not None ,)
onnx_pipeline.save_pretrained(_SCREAMING_SNAKE_CASE )
print("ONNX pipeline saved to" ,_SCREAMING_SNAKE_CASE )
del pipeline
del onnx_pipeline
lowerCamelCase : List[str] = OnnxStableDiffusionPipeline.from_pretrained(_SCREAMING_SNAKE_CASE ,provider="CPUExecutionProvider" )
print("ONNX pipeline is loadable" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE__ : int = argparse.ArgumentParser()
parser.add_argument(
'--model_path',
type=str,
required=True,
help='Path to the `diffusers` checkpoint to convert (either a local directory or on the Hub).',
)
parser.add_argument('--output_path', type=str, required=True, help='Path to the output model.')
parser.add_argument(
'--opset',
default=14,
type=int,
help='The version of the ONNX operator set to use.',
)
parser.add_argument('--fp16', action='store_true', default=False, help='Export the models in `float16` mode')
SCREAMING_SNAKE_CASE__ : Any = parser.parse_args()
convert_models(args.model_path, args.output_path, args.opset, args.fpaa)
| 48 |
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(lowerCAmelCase__ ) , """Tatoeba directory does not exist.""" )
class UpperCamelCase__ (unittest.TestCase ):
'''simple docstring'''
@cached_property
def _lowercase ( self ) -> int:
lowerCamelCase : str = tempfile.mkdtemp()
return TatoebaConverter(save_dir=UpperCamelCase__ )
@slow
def _lowercase ( self ) -> List[Any]:
self.resolver.convert_models(["heb-eng"] )
@slow
def _lowercase ( self ) -> Tuple:
lowerCamelCase , lowerCamelCase : Dict = self.resolver.write_model_card("opus-mt-he-en" , dry_run=UpperCamelCase__ )
assert mmeta["long_pair"] == "heb-eng"
| 48 | 1 |
import argparse
import os
import sys
from unittest.mock import patch
import pytorch_lightning as pl
import timeout_decorator
import torch
from distillation import SummarizationDistiller, distill_main
from finetune import SummarizationModule, main
from transformers import MarianMTModel
from transformers.file_utils import cached_path
from transformers.testing_utils import TestCasePlus, require_torch_gpu, slow
from utils import load_json
lowerCAmelCase = '''sshleifer/mar_enro_6_3_student'''
class A ( A_ ):
def _A (self ):
super().setUp()
__lowercase= cached_path(
'https://cdn-datasets.huggingface.co/translation/wmt_en_ro-tr40k-va0.5k-te0.5k.tar.gz' , extract_compressed_file=lowerCAmelCase , )
__lowercase= f'{data_cached}/wmt_en_ro-tr40k-va0.5k-te0.5k'
@slow
@require_torch_gpu
def _A (self ):
MarianMTModel.from_pretrained(lowerCAmelCase )
@slow
@require_torch_gpu
def _A (self ):
__lowercase= {
'$MAX_LEN': 6_4,
'$BS': 6_4,
'$GAS': 1,
'$ENRO_DIR': self.data_dir,
'facebook/mbart-large-cc25': MARIAN_MODEL,
# "val_check_interval=0.25": "val_check_interval=1.0",
'--learning_rate=3e-5': '--learning_rate 3e-4',
'--num_train_epochs 6': '--num_train_epochs 1',
}
# Clean up bash script
__lowercase= (self.test_file_dir / 'train_mbart_cc25_enro.sh').open().read().split('finetune.py' )[1].strip()
__lowercase= bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
for k, v in env_vars_to_replace.items():
__lowercase= bash_script.replace(lowerCAmelCase , str(lowerCAmelCase ) )
__lowercase= self.get_auto_remove_tmp_dir()
# bash_script = bash_script.replace("--fp16 ", "")
__lowercase= f'\n --output_dir {output_dir}\n --tokenizer_name Helsinki-NLP/opus-mt-en-ro\n --sortish_sampler\n --do_predict\n --gpus 1\n --freeze_encoder\n --n_train 40000\n --n_val 500\n --n_test 500\n --fp16_opt_level O1\n --num_sanity_val_steps 0\n --eval_beams 2\n '.split()
# XXX: args.gpus > 1 : handle multi_gpu in the future
__lowercase= ['finetune.py'] + bash_script.split() + args
with patch.object(lowerCAmelCase , 'argv' , lowerCAmelCase ):
__lowercase= argparse.ArgumentParser()
__lowercase= pl.Trainer.add_argparse_args(lowerCAmelCase )
__lowercase= SummarizationModule.add_model_specific_args(lowerCAmelCase , os.getcwd() )
__lowercase= parser.parse_args()
__lowercase= main(lowerCAmelCase )
# Check metrics
__lowercase= load_json(model.metrics_save_path )
__lowercase= metrics['val'][0]
__lowercase= metrics['val'][-1]
self.assertEqual(len(metrics['val'] ) , (args.max_epochs / args.val_check_interval) )
assert isinstance(last_step_stats[f'val_avg_{model.val_metric}'] , lowerCAmelCase )
self.assertGreater(last_step_stats['val_avg_gen_time'] , 0.01 )
# model hanging on generate. Maybe bad config was saved. (XXX: old comment/assert?)
self.assertLessEqual(last_step_stats['val_avg_gen_time'] , 1.0 )
# test learning requirements:
# 1. BLEU improves over the course of training by more than 2 pts
self.assertGreater(last_step_stats['val_avg_bleu'] - first_step_stats['val_avg_bleu'] , 2 )
# 2. BLEU finishes above 17
self.assertGreater(last_step_stats['val_avg_bleu'] , 1_7 )
# 3. test BLEU and val BLEU within ~1.1 pt.
self.assertLess(abs(metrics['val'][-1]['val_avg_bleu'] - metrics['test'][-1]['test_avg_bleu'] ) , 1.1 )
# check lightning ckpt can be loaded and has a reasonable statedict
__lowercase= os.listdir(lowerCAmelCase )
__lowercase= [x for x in contents if x.endswith('.ckpt' )][0]
__lowercase= os.path.join(args.output_dir , lowerCAmelCase )
__lowercase= torch.load(lowerCAmelCase , map_location='cpu' )
__lowercase= 'model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
__lowercase= {os.path.basename(lowerCAmelCase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
class A ( A_ ):
@timeout_decorator.timeout(6_0_0 )
@slow
@require_torch_gpu
def _A (self ):
__lowercase= f'{self.test_file_dir_str}/test_data/wmt_en_ro'
__lowercase= {
'--fp16_opt_level=O1': '',
'$MAX_LEN': 1_2_8,
'$BS': 1_6,
'$GAS': 1,
'$ENRO_DIR': data_dir,
'$m': 'sshleifer/student_marian_en_ro_6_1',
'val_check_interval=0.25': 'val_check_interval=1.0',
}
# Clean up bash script
__lowercase= (
(self.test_file_dir / 'distil_marian_no_teacher.sh').open().read().split('distillation.py' )[1].strip()
)
__lowercase= bash_script.replace('\\\n' , '' ).strip().replace('"$@"' , '' )
__lowercase= bash_script.replace('--fp16 ' , ' ' )
for k, v in env_vars_to_replace.items():
__lowercase= bash_script.replace(lowerCAmelCase , str(lowerCAmelCase ) )
__lowercase= self.get_auto_remove_tmp_dir()
__lowercase= bash_script.replace('--fp16' , '' )
__lowercase= 6
__lowercase= (
['distillation.py']
+ bash_script.split()
+ [
f'--output_dir={output_dir}',
'--gpus=1',
'--learning_rate=1e-3',
f'--num_train_epochs={epochs}',
'--warmup_steps=10',
'--val_check_interval=1.0',
'--do_predict',
]
)
with patch.object(lowerCAmelCase , 'argv' , lowerCAmelCase ):
__lowercase= argparse.ArgumentParser()
__lowercase= pl.Trainer.add_argparse_args(lowerCAmelCase )
__lowercase= SummarizationDistiller.add_model_specific_args(lowerCAmelCase , os.getcwd() )
__lowercase= parser.parse_args()
# assert args.gpus == gpus THIS BREAKS for multi_gpu
__lowercase= distill_main(lowerCAmelCase )
# Check metrics
__lowercase= load_json(model.metrics_save_path )
__lowercase= metrics['val'][0]
__lowercase= metrics['val'][-1]
assert len(metrics['val'] ) >= (args.max_epochs / args.val_check_interval) # +1 accounts for val_sanity_check
assert last_step_stats["val_avg_gen_time"] >= 0.01
assert first_step_stats["val_avg_bleu"] < last_step_stats["val_avg_bleu"] # model learned nothing
assert 1.0 >= last_step_stats["val_avg_gen_time"] # model hanging on generate. Maybe bad config was saved.
assert isinstance(last_step_stats[f'val_avg_{model.val_metric}'] , lowerCAmelCase )
# check lightning ckpt can be loaded and has a reasonable statedict
__lowercase= os.listdir(lowerCAmelCase )
__lowercase= [x for x in contents if x.endswith('.ckpt' )][0]
__lowercase= os.path.join(args.output_dir , lowerCAmelCase )
__lowercase= torch.load(lowerCAmelCase , map_location='cpu' )
__lowercase= 'model.model.decoder.layers.0.encoder_attn_layer_norm.weight'
assert expected_key in ckpt["state_dict"]
assert ckpt["state_dict"]["model.model.decoder.layers.0.encoder_attn_layer_norm.weight"].dtype == torch.floataa
# TODO: turn on args.do_predict when PL bug fixed.
if args.do_predict:
__lowercase= {os.path.basename(lowerCAmelCase ) for p in contents}
assert "test_generations.txt" in contents
assert "test_results.txt" in contents
# assert len(metrics["val"]) == desired_n_evals
assert len(metrics['test'] ) == 1
| 304 |
from typing import Any
import numpy as np
def _lowerCamelCase( lowercase__ ) -> bool:
'''simple docstring'''
return np.array_equal(lowercase__ , matrix.conjugate().T )
def _lowerCamelCase( lowercase__ , lowercase__ ) -> Any:
'''simple docstring'''
__lowercase= v.conjugate().T
__lowercase= v_star.dot(lowercase__ )
assert isinstance(lowercase__ , np.ndarray )
return (v_star_dot.dot(lowercase__ )) / (v_star.dot(lowercase__ ))
def _lowerCamelCase( ) -> None:
'''simple docstring'''
__lowercase= np.array([[2, 2 + 1j, 4], [2 - 1j, 3, 1j], [4, -1j, 1]] )
__lowercase= np.array([[1], [2], [3]] )
assert is_hermitian(lowercase__ ), F'{a} is not hermitian.'
print(rayleigh_quotient(lowercase__ , lowercase__ ) )
__lowercase= np.array([[1, 2, 4], [2, 3, -1], [4, -1, 1]] )
assert is_hermitian(lowercase__ ), F'{a} is not hermitian.'
assert rayleigh_quotient(lowercase__ , lowercase__ ) == float(3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
tests()
| 304 | 1 |
class __snake_case :
def __init__( self : str , A_ : Dict , A_ : Optional[int]):
lowerCAmelCase_ : str = name
lowerCAmelCase_ : List[Any] = val
def __str__( self : List[str]):
return F"""{self.__class__.__name__}({self.name}, {self.val})"""
def __lt__( self : List[str] , A_ : Tuple):
return self.val < other.val
class __snake_case :
def __init__( self : int , A_ : Dict):
lowerCAmelCase_ : List[str] = {}
lowerCAmelCase_ : Any = {}
lowerCAmelCase_ : Dict = self.build_heap(A_)
def __getitem__( self : List[str] , A_ : List[Any]):
return self.get_value(A_)
def UpperCAmelCase__ ( self : List[Any] , A_ : str):
return (idx - 1) // 2
def UpperCAmelCase__ ( self : int , A_ : List[str]):
return idx * 2 + 1
def UpperCAmelCase__ ( self : str , A_ : List[Any]):
return idx * 2 + 2
def UpperCAmelCase__ ( self : Optional[Any] , A_ : Tuple):
return self.heap_dict[key]
def UpperCAmelCase__ ( self : Union[str, Any] , A_ : Tuple):
lowerCAmelCase_ : Dict = len(A_) - 1
lowerCAmelCase_ : List[Any] = self.get_parent_idx(A_)
for idx, i in enumerate(A_):
lowerCAmelCase_ : List[str] = idx
lowerCAmelCase_ : Any = i.val
for i in range(A_ , -1 , -1):
self.sift_down(A_ , A_)
return array
def UpperCAmelCase__ ( self : List[str] , A_ : int , A_ : Tuple):
while True:
lowerCAmelCase_ : Union[str, Any] = self.get_left_child_idx(A_) # noqa: E741
lowerCAmelCase_ : int = self.get_right_child_idx(A_)
lowerCAmelCase_ : int = idx
if l < len(A_) and array[l] < array[idx]:
lowerCAmelCase_ : Any = l
if r < len(A_) and array[r] < array[smallest]:
lowerCAmelCase_ : List[str] = r
if smallest != idx:
lowerCAmelCase_ , lowerCAmelCase_ : List[Any] = array[smallest], array[idx]
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) : List[Any] = (
self.idx_of_element[array[smallest]],
self.idx_of_element[array[idx]],
)
lowerCAmelCase_ : Union[str, Any] = smallest
else:
break
def UpperCAmelCase__ ( self : Tuple , A_ : List[str]):
lowerCAmelCase_ : Any = self.get_parent_idx(A_)
while p >= 0 and self.heap[p] > self.heap[idx]:
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = self.heap[idx], self.heap[p]
lowerCAmelCase_ , lowerCAmelCase_ : Optional[Any] = (
self.idx_of_element[self.heap[idx]],
self.idx_of_element[self.heap[p]],
)
lowerCAmelCase_ : int = p
lowerCAmelCase_ : Tuple = self.get_parent_idx(A_)
def UpperCAmelCase__ ( self : Optional[Any]):
return self.heap[0]
def UpperCAmelCase__ ( self : Tuple):
lowerCAmelCase_ , lowerCAmelCase_ : Optional[int] = self.heap[-1], self.heap[0]
lowerCAmelCase_ , lowerCAmelCase_ : Any = (
self.idx_of_element[self.heap[-1]],
self.idx_of_element[self.heap[0]],
)
lowerCAmelCase_ : int = self.heap.pop()
del self.idx_of_element[x]
self.sift_down(0 , self.heap)
return x
def UpperCAmelCase__ ( self : Any , A_ : Any):
self.heap.append(A_)
lowerCAmelCase_ : Optional[int] = len(self.heap) - 1
lowerCAmelCase_ : Optional[int] = node.val
self.sift_up(len(self.heap) - 1)
def UpperCAmelCase__ ( self : List[Any]):
return len(self.heap) == 0
def UpperCAmelCase__ ( self : Optional[int] , A_ : Optional[Any] , A_ : int):
assert (
self.heap[self.idx_of_element[node]].val > new_value
), "newValue must be less that current value"
lowerCAmelCase_ : Tuple = new_value
lowerCAmelCase_ : Tuple = new_value
self.sift_up(self.idx_of_element[node])
A__ : List[str] = Node('''R''', -1)
A__ : Optional[Any] = Node('''B''', 6)
A__ : List[str] = Node('''A''', 3)
A__ : Optional[Any] = Node('''X''', 1)
A__ : Tuple = Node('''E''', 4)
# Use one of these two ways to generate Min-Heap
# Generating Min-Heap from array
A__ : Optional[Any] = MinHeap([r, b, a, x, e])
# Generating Min-Heap by Insert method
# myMinHeap.insert(a)
# myMinHeap.insert(b)
# myMinHeap.insert(x)
# myMinHeap.insert(r)
# myMinHeap.insert(e)
# Before
print('''Min Heap - before decrease key''')
for i in my_min_heap.heap:
print(i)
print('''Min Heap - After decrease key of node [B -> -17]''')
my_min_heap.decrease_key(b, -17)
# After
for i in my_min_heap.heap:
print(i)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 103 |
'''simple docstring'''
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class _a ( __a ):
__a : int = ["""image_processor""", """tokenizer"""]
__a : Union[str, Any] = """ChineseCLIPImageProcessor"""
__a : List[Any] = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : Dict , lowercase : Union[str, Any]=None , lowercase : Dict=None , **lowercase : Optional[Any] ):
'''simple docstring'''
UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
'''The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'''
''' instead.''' , lowercase , )
UpperCAmelCase = kwargs.pop('''feature_extractor''' )
UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('''You need to specify an `image_processor`.''' )
if tokenizer is None:
raise ValueError('''You need to specify a `tokenizer`.''' )
super().__init__(lowercase , lowercase )
UpperCAmelCase = self.image_processor
def __call__( self : Tuple , lowercase : Optional[Any]=None , lowercase : Union[str, Any]=None , lowercase : int=None , **lowercase : Dict ):
'''simple docstring'''
if text is None and images is None:
raise ValueError('''You have to specify either text or images. Both cannot be none.''' )
if text is not None:
UpperCAmelCase = self.tokenizer(lowercase , return_tensors=lowercase , **lowercase )
if images is not None:
UpperCAmelCase = self.image_processor(lowercase , return_tensors=lowercase , **lowercase )
if text is not None and images is not None:
UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**lowercase ) , tensor_type=lowercase )
def A ( self : int , *lowercase : Tuple , **lowercase : List[str] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowercase , **lowercase )
def A ( self : Optional[Any] , *lowercase : int , **lowercase : Optional[int] ):
'''simple docstring'''
return self.tokenizer.decode(*lowercase , **lowercase )
@property
def A ( self : Optional[int] ):
'''simple docstring'''
UpperCAmelCase = self.tokenizer.model_input_names
UpperCAmelCase = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def A ( self : List[Any] ):
'''simple docstring'''
warnings.warn(
'''`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.''' , lowercase , )
return self.image_processor_class
| 34 | 0 |
'''simple docstring'''
def _lowerCAmelCase ( _UpperCamelCase : list ) -> list:
"""simple docstring"""
if any(not isinstance(_UpperCamelCase , _UpperCamelCase ) or x < 0 for x in sequence ):
raise TypeError('Sequence must be list of non-negative integers' )
for _ in range(len(_UpperCamelCase ) ):
for i, (rod_upper, rod_lower) in enumerate(zip(_UpperCamelCase , sequence[1:] ) ):
if rod_upper > rod_lower:
sequence[i] -= rod_upper - rod_lower
sequence[i + 1] += rod_upper - rod_lower
return sequence
if __name__ == "__main__":
assert bead_sort([5, 4, 3, 2, 1]) == [1, 2, 3, 4, 5]
assert bead_sort([7, 9, 4, 3, 5]) == [3, 4, 5, 7, 9]
| 350 |
'''simple docstring'''
from math import factorial
def _lowerCAmelCase ( _UpperCamelCase : int , _UpperCamelCase : int , _UpperCamelCase : float ) -> float:
"""simple docstring"""
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(_UpperCamelCase , _UpperCamelCase ) or not isinstance(_UpperCamelCase , _UpperCamelCase ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
_SCREAMING_SNAKE_CASE =(prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
_SCREAMING_SNAKE_CASE =float(factorial(_UpperCamelCase ) )
coefficient /= factorial(_UpperCamelCase ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("Probability of 2 successes out of 4 trails")
print("with probability of 0.75 is:", end=" ")
print(binomial_distribution(2, 4, 0.7_5))
| 114 | 0 |
from __future__ import annotations
__UpperCamelCase : List[Any] = [-10, -5, 0, 5, 5.1, 11, 13, 21, 3, 4, -21, -10, -5, -1, 0]
__UpperCamelCase : List[Any] = [-5, 0, 5, 5.1, 11, 13, 21, -1, 4, -1, -10, -5, -1, 0, -1]
def a_ ( _A ) -> list[float]:
"""simple docstring"""
snake_case__ = []
snake_case__ = len(_A )
for i in range(_A ):
snake_case__ = -1
for j in range(i + 1 , _A ):
if arr[i] < arr[j]:
snake_case__ = arr[j]
break
result.append(_A )
return result
def a_ ( _A ) -> list[float]:
"""simple docstring"""
snake_case__ = []
for i, outer in enumerate(_A ):
snake_case__ = -1
for inner in arr[i + 1 :]:
if outer < inner:
snake_case__ = inner
break
result.append(_A )
return result
def a_ ( _A ) -> list[float]:
"""simple docstring"""
snake_case__ = len(_A )
snake_case__ = []
snake_case__ = [-1] * arr_size
for index in reversed(range(_A ) ):
if stack:
while stack[-1] <= arr[index]:
stack.pop()
if not stack:
break
if stack:
snake_case__ = stack[-1]
stack.append(arr[index] )
return result
if __name__ == "__main__":
from doctest import testmod
from timeit import timeit
testmod()
print(next_greatest_element_slow(arr))
print(next_greatest_element_fast(arr))
print(next_greatest_element(arr))
__UpperCamelCase : Union[str, Any] = (
"""from __main__ import arr, next_greatest_element_slow, """
"""next_greatest_element_fast, next_greatest_element"""
)
print(
"""next_greatest_element_slow():""",
timeit("""next_greatest_element_slow(arr)""", setup=setup),
)
print(
"""next_greatest_element_fast():""",
timeit("""next_greatest_element_fast(arr)""", setup=setup),
)
print(
""" next_greatest_element():""",
timeit("""next_greatest_element(arr)""", setup=setup),
)
| 307 |
def a_ ( _A = 1000 ) -> int:
"""simple docstring"""
return sum(e for e in range(3 , _A ) if e % 3 == 0 or e % 5 == 0 )
if __name__ == "__main__":
print(f'''{solution() = }''')
| 307 | 1 |
import unittest
import torch
from diffusers import DDIMScheduler, DDPMScheduler, UNetaDModel
from diffusers.training_utils import set_seed
from diffusers.utils.testing_utils import slow
SCREAMING_SNAKE_CASE_ = False
class UpperCamelCase__ ( unittest.TestCase ):
'''simple docstring'''
def SCREAMING_SNAKE_CASE__ ( self : str ,lowerCamelCase__ : int=32 ) -> str:
'''simple docstring'''
set_seed(0 )
SCREAMING_SNAKE_CASE = UNetaDModel(sample_size=lowerCamelCase__ ,in_channels=3 ,out_channels=3 )
SCREAMING_SNAKE_CASE = torch.optim.SGD(model.parameters() ,lr=0.0001 )
return model, optimizer
@slow
def SCREAMING_SNAKE_CASE__ ( self : Tuple ) -> int:
'''simple docstring'''
SCREAMING_SNAKE_CASE = """cpu""" # ensure full determinism without setting the CUBLAS_WORKSPACE_CONFIG env variable
SCREAMING_SNAKE_CASE = DDPMScheduler(
num_train_timesteps=1000 ,beta_start=0.0001 ,beta_end=0.02 ,beta_schedule="""linear""" ,clip_sample=lowerCamelCase__ ,)
SCREAMING_SNAKE_CASE = DDIMScheduler(
num_train_timesteps=1000 ,beta_start=0.0001 ,beta_end=0.02 ,beta_schedule="""linear""" ,clip_sample=lowerCamelCase__ ,)
assert ddpm_scheduler.config.num_train_timesteps == ddim_scheduler.config.num_train_timesteps
# shared batches for DDPM and DDIM
set_seed(0 )
SCREAMING_SNAKE_CASE = [torch.randn((4, 3, 32, 32) ).clip(-1 ,1 ).to(lowerCamelCase__ ) for _ in range(4 )]
SCREAMING_SNAKE_CASE = [torch.randn((4, 3, 32, 32) ).to(lowerCamelCase__ ) for _ in range(4 )]
SCREAMING_SNAKE_CASE = [torch.randint(0 ,1000 ,(4,) ).long().to(lowerCamelCase__ ) for _ in range(4 )]
# train with a DDPM scheduler
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.get_model_optimizer(resolution=32 )
model.train().to(lowerCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
SCREAMING_SNAKE_CASE = ddpm_scheduler.add_noise(clean_images[i] ,noise[i] ,timesteps[i] )
SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ,timesteps[i] ).sample
SCREAMING_SNAKE_CASE = torch.nn.functional.mse_loss(lowerCamelCase__ ,noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
# recreate the model and optimizer, and retry with DDIM
SCREAMING_SNAKE_CASE, SCREAMING_SNAKE_CASE = self.get_model_optimizer(resolution=32 )
model.train().to(lowerCamelCase__ )
for i in range(4 ):
optimizer.zero_grad()
SCREAMING_SNAKE_CASE = ddim_scheduler.add_noise(clean_images[i] ,noise[i] ,timesteps[i] )
SCREAMING_SNAKE_CASE = model(lowerCamelCase__ ,timesteps[i] ).sample
SCREAMING_SNAKE_CASE = torch.nn.functional.mse_loss(lowerCamelCase__ ,noise[i] )
loss.backward()
optimizer.step()
del model, optimizer
self.assertTrue(torch.allclose(lowerCamelCase__ ,lowerCamelCase__ ,atol=1e-5 ) )
self.assertTrue(torch.allclose(lowerCamelCase__ ,lowerCamelCase__ ,atol=1e-5 ) )
| 361 |
import json
import sys
def __lowercase ( _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) -> Any:
'''simple docstring'''
with open(_SCREAMING_SNAKE_CASE , encoding="""utf-8""" ) as f:
SCREAMING_SNAKE_CASE = json.load(_SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = ["""<details>""", """<summary>Show updated benchmarks!</summary>""", """ """]
for benchmark_name in sorted(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = results[benchmark_name]
SCREAMING_SNAKE_CASE = benchmark_name.split("""/""" )[-1]
output_md.append(F"""### Benchmark: {benchmark_file_name}""" )
SCREAMING_SNAKE_CASE = """| metric |"""
SCREAMING_SNAKE_CASE = """|--------|"""
SCREAMING_SNAKE_CASE = """| new / old (diff) |"""
for metric_name in sorted(_SCREAMING_SNAKE_CASE ):
SCREAMING_SNAKE_CASE = benchmark_res[metric_name]
SCREAMING_SNAKE_CASE = metric_vals["""new"""]
SCREAMING_SNAKE_CASE = metric_vals.get("""old""" , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = metric_vals.get("""diff""" , _SCREAMING_SNAKE_CASE )
SCREAMING_SNAKE_CASE = F""" {new_val:f}""" if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else """None"""
if old_val is not None:
val_str += F""" / {old_val:f}""" if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
if dif_val is not None:
val_str += F""" ({dif_val:f})""" if isinstance(_SCREAMING_SNAKE_CASE , (int, float) ) else "None"
title += " " + metric_name + " |"
lines += "---|"
value += val_str + " |"
output_md += [title, lines, value, " "]
output_md.append("""</details>""" )
with open(_SCREAMING_SNAKE_CASE , """w""" , encoding="""utf-8""" ) as f:
f.writelines("""\n""".join(_SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE_ = sys.argv[1]
SCREAMING_SNAKE_CASE_ = sys.argv[2]
format_json_to_md(input_json_file, output_md_file)
| 193 | 0 |
"""simple docstring"""
from __future__ import annotations
from math import pi
def A ( snake_case :float , snake_case :float , snake_case :float ) -> dict[str, float]:
if (inductance, frequency, reactance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if inductance < 0:
raise ValueError('Inductance cannot be negative' )
if frequency < 0:
raise ValueError('Frequency cannot be negative' )
if reactance < 0:
raise ValueError('Inductive reactance cannot be negative' )
if inductance == 0:
return {"inductance": reactance / (2 * pi * frequency)}
elif frequency == 0:
return {"frequency": reactance / (2 * pi * inductance)}
elif reactance == 0:
return {"reactance": 2 * pi * frequency * inductance}
else:
raise ValueError('Exactly one argument must be 0' )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 316 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from pathlib import Path
from typing import Optional, Union
from .generation.configuration_utils import GenerationConfig
from .training_args import TrainingArguments
from .utils import add_start_docstrings
UpperCamelCase : str = logging.getLogger(__name__)
@dataclass
@add_start_docstrings(TrainingArguments.__doc__ )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
lowercase = field(default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to use SortishSampler or not."} )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={"help": "Whether to use generate to calculate generative metrics (ROUGE, BLEU)."} )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"The `max_length` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `max_length` value of the model configuration."
)
} , )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": (
"The `num_beams` to use on each evaluation loop when `predict_with_generate=True`. Will default "
"to the `num_beams` value of the model configuration."
)
} , )
lowercase = field(
default=__SCREAMING_SNAKE_CASE , metadata={
"help": "Model id, file path or url pointing to a GenerationConfig json file, to use during prediction."
} , )
def UpperCAmelCase ( self ):
'''simple docstring'''
__UpperCamelCase = super().to_dict()
for k, v in d.items():
if isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__UpperCamelCase = v.to_dict()
return d
| 316 | 1 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Generic, TypeVar
__snake_case :Optional[int] = TypeVar('''T''')
class _A ( Generic[T] ):
def __init__( self : Any , __SCREAMING_SNAKE_CASE : T):
'''simple docstring'''
__a = data
__a = None
def __str__( self : Optional[int]):
'''simple docstring'''
return F'{self.data}'
class _A ( Generic[T] ):
def __init__( self : Optional[Any]):
'''simple docstring'''
__a = None
def __iter__( self : Optional[int]):
'''simple docstring'''
__a = self.top
while node:
yield node.data
__a = node.next
def __str__( self : Optional[Any]):
'''simple docstring'''
return "->".join([str(__SCREAMING_SNAKE_CASE) for item in self])
def __len__( self : List[Any]):
'''simple docstring'''
return len(tuple(iter(self)))
def _lowerCamelCase ( self : int):
'''simple docstring'''
return self.top is None
def _lowerCamelCase ( self : List[Any] , __SCREAMING_SNAKE_CASE : T):
'''simple docstring'''
__a = Node(__SCREAMING_SNAKE_CASE)
if not self.is_empty():
__a = self.top
__a = node
def _lowerCamelCase ( self : List[str]):
'''simple docstring'''
if self.is_empty():
raise IndexError('''pop from empty stack''')
assert isinstance(self.top , __SCREAMING_SNAKE_CASE)
__a = self.top
__a = self.top.next
return pop_node.data
def _lowerCamelCase ( self : Tuple):
'''simple docstring'''
if self.is_empty():
raise IndexError('''peek from empty stack''')
assert self.top is not None
return self.top.data
def _lowerCamelCase ( self : str):
'''simple docstring'''
__a = None
if __name__ == "__main__":
from doctest import testmod
testmod()
| 353 |
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from transformers.modeling_outputs import BaseModelOutput
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_OBJECT_DETECTION_MAPPING
__snake_case :Tuple = logging.get_logger(__name__)
@add_end_docstrings(__UpperCAmelCase )
class _A ( __UpperCAmelCase ):
def __init__( self : Optional[int] , **__SCREAMING_SNAKE_CASE : Optional[Any]):
'''simple docstring'''
super().__init__(**__SCREAMING_SNAKE_CASE)
if self.framework == "tf":
raise ValueError(F'The {self.__class__} is only available in PyTorch.')
requires_backends(self , '''vision''')
self.check_model_type(__SCREAMING_SNAKE_CASE)
def __call__( self : List[Any] , __SCREAMING_SNAKE_CASE : Union[str, "Image.Image", List[Dict[str, Any]]] , __SCREAMING_SNAKE_CASE : Union[str, List[str]] = None , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ):
'''simple docstring'''
if "text_queries" in kwargs:
__a = kwargs.pop('''text_queries''')
if isinstance(__SCREAMING_SNAKE_CASE , (str, Image.Image)):
__a = {'''image''': image, '''candidate_labels''': candidate_labels}
else:
__a = image
__a = super().__call__(__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE)
return results
def _lowerCamelCase ( self : str , **__SCREAMING_SNAKE_CASE : Union[str, Any]):
'''simple docstring'''
__a = {}
if "threshold" in kwargs:
__a = kwargs['''threshold''']
if "top_k" in kwargs:
__a = kwargs['''top_k''']
return {}, {}, postprocess_params
def _lowerCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
__a = load_image(inputs['''image'''])
__a = inputs['''candidate_labels''']
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE):
__a = candidate_labels.split(''',''')
__a = torch.tensor([[image.height, image.width]] , dtype=torch.intaa)
for i, candidate_label in enumerate(__SCREAMING_SNAKE_CASE):
__a = self.tokenizer(__SCREAMING_SNAKE_CASE , return_tensors=self.framework)
__a = self.image_processor(__SCREAMING_SNAKE_CASE , return_tensors=self.framework)
yield {
"is_last": i == len(__SCREAMING_SNAKE_CASE) - 1,
"target_size": target_size,
"candidate_label": candidate_label,
**text_inputs,
**image_features,
}
def _lowerCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Tuple):
'''simple docstring'''
__a = model_inputs.pop('''target_size''')
__a = model_inputs.pop('''candidate_label''')
__a = model_inputs.pop('''is_last''')
__a = self.model(**__SCREAMING_SNAKE_CASE)
__a = {'''target_size''': target_size, '''candidate_label''': candidate_label, '''is_last''': is_last, **outputs}
return model_outputs
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : Union[str, Any]=None):
'''simple docstring'''
__a = []
for model_output in model_outputs:
__a = model_output['''candidate_label''']
__a = BaseModelOutput(__SCREAMING_SNAKE_CASE)
__a = self.image_processor.post_process_object_detection(
outputs=__SCREAMING_SNAKE_CASE , threshold=__SCREAMING_SNAKE_CASE , target_sizes=model_output['''target_size'''])[0]
for index in outputs["scores"].nonzero():
__a = outputs['''scores'''][index].item()
__a = self._get_bounding_box(outputs['''boxes'''][index][0])
__a = {'''score''': score, '''label''': label, '''box''': box}
results.append(__SCREAMING_SNAKE_CASE)
__a = sorted(__SCREAMING_SNAKE_CASE , key=lambda __SCREAMING_SNAKE_CASE: x["score"] , reverse=__SCREAMING_SNAKE_CASE)
if top_k:
__a = results[:top_k]
return results
def _lowerCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : "torch.Tensor"):
'''simple docstring'''
if self.framework != "pt":
raise ValueError('''The ZeroShotObjectDetectionPipeline is only available in PyTorch.''')
__a , __a , __a , __a = box.int().tolist()
__a = {
'''xmin''': xmin,
'''ymin''': ymin,
'''xmax''': xmax,
'''ymax''': ymax,
}
return bbox
| 131 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase__ = logging.get_logger(__name__)
lowerCAmelCase__ = {
"""vinvino02/glpn-kitti""": """https://huggingface.co/vinvino02/glpn-kitti/resolve/main/config.json""",
# See all GLPN models at https://huggingface.co/models?filter=glpn
}
class a__ ( snake_case ):
"""simple docstring"""
__lowerCamelCase = 'glpn'
def __init__( self , lowercase=3 , lowercase=4 , lowercase=[2, 2, 2, 2] , lowercase=[8, 4, 2, 1] , lowercase=[32, 64, 160, 256] , lowercase=[7, 3, 3, 3] , lowercase=[4, 2, 2, 2] , lowercase=[1, 2, 5, 8] , lowercase=[4, 4, 4, 4] , lowercase="gelu" , lowercase=0.0 , lowercase=0.0 , lowercase=0.02 , lowercase=0.1 , lowercase=1e-6 , lowercase=64 , lowercase=10 , lowercase=-1 , **lowercase , ) -> Dict:
'''simple docstring'''
super().__init__(**lowercase )
A__ = num_channels
A__ = num_encoder_blocks
A__ = depths
A__ = sr_ratios
A__ = hidden_sizes
A__ = patch_sizes
A__ = strides
A__ = mlp_ratios
A__ = num_attention_heads
A__ = hidden_act
A__ = hidden_dropout_prob
A__ = attention_probs_dropout_prob
A__ = initializer_range
A__ = drop_path_rate
A__ = layer_norm_eps
A__ = decoder_hidden_size
A__ = max_depth
A__ = head_in_index
| 68 |
"""simple docstring"""
# Lint as: python3
import itertools
import os
import re
_UpperCAmelCase = re.compile(r"""([A-Z]+)([A-Z][a-z])""")
_UpperCAmelCase = re.compile(r"""([a-z\d])([A-Z])""")
_UpperCAmelCase = re.compile(r"""(?<!_)_(?!_)""")
_UpperCAmelCase = re.compile(r"""(_{2,})""")
_UpperCAmelCase = r"""^\w+(\.\w+)*$"""
_UpperCAmelCase = r"""<>:/\|?*"""
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =_uppercase_uppercase_re.sub(R"""\1_\2""" , lowercase )
SCREAMING_SNAKE_CASE_: str =_lowercase_uppercase_re.sub(R"""\1_\2""" , lowercase )
return name.lower()
def __magic_name__ ( lowercase ):
SCREAMING_SNAKE_CASE_: Optional[int] =_single_underscore_re.split(lowercase )
SCREAMING_SNAKE_CASE_: Any =[_multiple_underscores_re.split(lowercase ) for n in name]
return "".join(n.capitalize() for n in itertools.chain.from_iterable(lowercase ) if n != """""" )
def __magic_name__ ( lowercase ):
if os.path.basename(lowercase ) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''' )
return camelcase_to_snakecase(lowercase )
def __magic_name__ ( lowercase , lowercase ):
if os.path.basename(lowercase ) != name:
raise ValueError(f'''Should be a dataset name, not a path: {name}''' )
if not re.match(_split_re , lowercase ):
raise ValueError(f'''Split name should match \'{_split_re}\'\' but got \'{split}\'.''' )
return f'''{filename_prefix_for_name(lowercase )}-{split}'''
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase=None ):
SCREAMING_SNAKE_CASE_: List[Any] =filename_prefix_for_split(lowercase , lowercase )
if filetype_suffix:
prefix += f'''.{filetype_suffix}'''
SCREAMING_SNAKE_CASE_: Dict =os.path.join(lowercase , lowercase )
return f'''{filepath}*'''
def __magic_name__ ( lowercase , lowercase , lowercase , lowercase=None , lowercase=None ):
SCREAMING_SNAKE_CASE_: List[Any] =filename_prefix_for_split(lowercase , lowercase )
SCREAMING_SNAKE_CASE_: int =os.path.join(lowercase , lowercase )
if shard_lengths:
SCREAMING_SNAKE_CASE_: Any =len(lowercase )
SCREAMING_SNAKE_CASE_: Optional[Any] =[f'''{prefix}-{shard_id:05d}-of-{num_shards:05d}''' for shard_id in range(lowercase )]
if filetype_suffix:
SCREAMING_SNAKE_CASE_: Optional[int] =[filename + f'''.{filetype_suffix}''' for filename in filenames]
return filenames
else:
SCREAMING_SNAKE_CASE_: List[Any] =prefix
if filetype_suffix:
filename += f'''.{filetype_suffix}'''
return [filename]
| 173 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class __magic_name__ :
def __init__( self , snake_case , snake_case=1_3 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=9_9 , snake_case=3_2 , snake_case=2 , snake_case=4 , snake_case=3_7 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=5_1_2 , snake_case=1_6 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ) -> List[Any]:
'''simple docstring'''
_UpperCAmelCase : Any =parent
_UpperCAmelCase : List[str] =1_3
_UpperCAmelCase : Optional[Any] =7
_UpperCAmelCase : str =True
_UpperCAmelCase : Dict =True
_UpperCAmelCase : Optional[int] =True
_UpperCAmelCase : Any =True
_UpperCAmelCase : Optional[int] =9_9
_UpperCAmelCase : Dict =3_2
_UpperCAmelCase : List[Any] =2
_UpperCAmelCase : Dict =4
_UpperCAmelCase : Any =3_7
_UpperCAmelCase : Dict ='gelu'
_UpperCAmelCase : str =0.1
_UpperCAmelCase : List[Any] =0.1
_UpperCAmelCase : Optional[int] =5_1_2
_UpperCAmelCase : List[Any] =1_6
_UpperCAmelCase : Optional[int] =2
_UpperCAmelCase : int =0.02
_UpperCAmelCase : Optional[int] =3
_UpperCAmelCase : str =4
_UpperCAmelCase : List[Any] =None
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCAmelCase : Dict =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCAmelCase : Union[str, Any] =None
if self.use_input_mask:
_UpperCAmelCase : Dict =random_attention_mask([self.batch_size, self.seq_length])
_UpperCAmelCase : List[Any] =None
if self.use_token_type_ids:
_UpperCAmelCase : Union[str, Any] =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCAmelCase : Union[str, Any] =None
_UpperCAmelCase : Optional[int] =None
_UpperCAmelCase : Tuple =None
if self.use_labels:
_UpperCAmelCase : Any =ids_tensor([self.batch_size] , self.type_sequence_label_size)
_UpperCAmelCase : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , self.num_labels)
_UpperCAmelCase : Any =ids_tensor([self.batch_size] , self.num_choices)
_UpperCAmelCase : Dict =RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=__SCREAMING_SNAKE_CASE , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCAmelCase ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : str =TFRoFormerModel(config=__SCREAMING_SNAKE_CASE)
_UpperCAmelCase : List[Any] ={'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_UpperCAmelCase : Any =[input_ids, input_mask]
_UpperCAmelCase : Any =model(__SCREAMING_SNAKE_CASE)
_UpperCAmelCase : Optional[Any] =model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size))
def lowerCAmelCase ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : int =True
_UpperCAmelCase : Optional[Any] =TFRoFormerForCausalLM(config=__SCREAMING_SNAKE_CASE)
_UpperCAmelCase : List[Any] ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_UpperCAmelCase : Dict =model(__SCREAMING_SNAKE_CASE)['logits']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape) , [self.batch_size, self.seq_length, self.vocab_size])
def lowerCAmelCase ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =TFRoFormerForMaskedLM(config=__SCREAMING_SNAKE_CASE)
_UpperCAmelCase : int ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_UpperCAmelCase : Dict =model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size))
def lowerCAmelCase ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case) -> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =self.num_labels
_UpperCAmelCase : Union[str, Any] =TFRoFormerForSequenceClassification(config=__SCREAMING_SNAKE_CASE)
_UpperCAmelCase : int ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_UpperCAmelCase : Union[str, Any] =model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels))
def lowerCAmelCase ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] =self.num_choices
_UpperCAmelCase : Dict =TFRoFormerForMultipleChoice(config=__SCREAMING_SNAKE_CASE)
_UpperCAmelCase : Optional[int] =tf.tile(tf.expand_dims(__SCREAMING_SNAKE_CASE , 1) , (1, self.num_choices, 1))
_UpperCAmelCase : int =tf.tile(tf.expand_dims(__SCREAMING_SNAKE_CASE , 1) , (1, self.num_choices, 1))
_UpperCAmelCase : Optional[int] =tf.tile(tf.expand_dims(__SCREAMING_SNAKE_CASE , 1) , (1, self.num_choices, 1))
_UpperCAmelCase : List[Any] ={
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
_UpperCAmelCase : Optional[int] =model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices))
def lowerCAmelCase ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Optional[int] =self.num_labels
_UpperCAmelCase : Optional[Any] =TFRoFormerForTokenClassification(config=__SCREAMING_SNAKE_CASE)
_UpperCAmelCase : str ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_UpperCAmelCase : List[Any] =model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels))
def lowerCAmelCase ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case) -> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =TFRoFormerForQuestionAnswering(config=__SCREAMING_SNAKE_CASE)
_UpperCAmelCase : Optional[Any] ={
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_UpperCAmelCase : Optional[Any] =model(__SCREAMING_SNAKE_CASE)
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length))
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length))
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : List[Any] =self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) : Optional[int] =config_and_inputs
_UpperCAmelCase : Any ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class __magic_name__ ( _A ,_A ,unittest.TestCase ):
UpperCAmelCase =(
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
UpperCAmelCase =(
{
"feature-extraction": TFRoFormerModel,
"fill-mask": TFRoFormerForMaskedLM,
"question-answering": TFRoFormerForQuestionAnswering,
"text-classification": TFRoFormerForSequenceClassification,
"text-generation": TFRoFormerForCausalLM,
"token-classification": TFRoFormerForTokenClassification,
"zero-shot": TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
UpperCAmelCase =False
UpperCAmelCase =False
def lowerCAmelCase ( self , snake_case , snake_case , snake_case , snake_case , snake_case) -> Dict:
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def lowerCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCAmelCase : str =TFRoFormerModelTester(self)
_UpperCAmelCase : Union[str, Any] =ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , hidden_size=3_7)
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
self.config_tester.run_common_tests()
def lowerCAmelCase ( self) -> Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE)
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__SCREAMING_SNAKE_CASE)
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCAmelCase : Dict =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*__SCREAMING_SNAKE_CASE)
def lowerCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Any =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__SCREAMING_SNAKE_CASE)
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__SCREAMING_SNAKE_CASE)
def lowerCAmelCase ( self) -> Any:
'''simple docstring'''
_UpperCAmelCase : List[str] =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__SCREAMING_SNAKE_CASE)
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Tuple =self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__SCREAMING_SNAKE_CASE)
@slow
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base')
self.assertIsNotNone(__SCREAMING_SNAKE_CASE)
@require_tf
class __magic_name__ ( unittest.TestCase ):
@slow
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCAmelCase : Union[str, Any] =TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base')
_UpperCAmelCase : Optional[int] =tf.constant([[0, 1, 2, 3, 4, 5]])
_UpperCAmelCase : int =model(__SCREAMING_SNAKE_CASE)[0]
# TODO Replace vocab size
_UpperCAmelCase : List[str] =5_0_0_0_0
_UpperCAmelCase : List[Any] =[1, 6, vocab_size]
self.assertEqual(output.shape , __SCREAMING_SNAKE_CASE)
print(output[:, :3, :3])
# TODO Replace values below with what was printed above.
_UpperCAmelCase : List[Any] =tf.constant(
[
[
[-0.12_05_33_41, -1.0_26_49_01, 0.29_22_19_46],
[-1.5_13_37_83, 0.19_74_33, 0.15_19_06_07],
[-5.0_13_54_03, -3.90_02_56, -0.84_03_87_64],
]
])
tf.debugging.assert_near(output[:, :3, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4)
@require_tf
class __magic_name__ ( unittest.TestCase ):
UpperCAmelCase =1e-4
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =tf.constant([[4, 1_0]])
_UpperCAmelCase : Union[str, Any] =TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6)
_UpperCAmelCase : List[str] =emba(input_ids.shape)
_UpperCAmelCase : List[Any] =tf.constant(
[[0.00_00, 0.00_00, 0.00_00, 1.00_00, 1.00_00, 1.00_00], [0.84_15, 0.04_64, 0.00_22, 0.54_03, 0.99_89, 1.00_00]])
tf.debugging.assert_near(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=self.tolerance)
def lowerCAmelCase ( self) -> int:
'''simple docstring'''
_UpperCAmelCase : Optional[Any] =tf.constant(
[
[0.00_00, 0.00_00, 0.00_00, 0.00_00, 0.00_00],
[0.84_15, 0.82_19, 0.80_20, 0.78_19, 0.76_17],
[0.90_93, 0.93_64, 0.95_81, 0.97_49, 0.98_70],
])
_UpperCAmelCase : Union[str, Any] =TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_1_2 , embedding_dim=5_1_2)
emba([2, 1_6, 5_1_2])
_UpperCAmelCase : List[Any] =emba.weight[:3, :5]
tf.debugging.assert_near(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , atol=self.tolerance)
@require_tf
class __magic_name__ ( unittest.TestCase ):
UpperCAmelCase =1e-4
def lowerCAmelCase ( self) -> List[Any]:
'''simple docstring'''
# 2,12,16,64
_UpperCAmelCase : List[Any] =tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa) , shape=(2, 1_2, 1_6, 6_4)) / 1_0_0
_UpperCAmelCase : Optional[Any] =-tf.reshape(tf.range(2 * 1_2 * 1_6 * 6_4 , dtype=tf.floataa) , shape=(2, 1_2, 1_6, 6_4)) / 1_0_0
_UpperCAmelCase : int =TFRoFormerSinusoidalPositionalEmbedding(num_positions=3_2 , embedding_dim=6_4)
_UpperCAmelCase : int =embed_positions([2, 1_6, 7_6_8])[None, None, :, :]
_UpperCAmelCase , _UpperCAmelCase : Dict =TFRoFormerSelfAttention.apply_rotary_position_embeddings(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
_UpperCAmelCase : List[str] =tf.constant(
[
[0.00_00, 0.01_00, 0.02_00, 0.03_00, 0.04_00, 0.05_00, 0.06_00, 0.07_00],
[-0.20_12, 0.88_97, 0.02_63, 0.94_01, 0.20_74, 0.94_63, 0.34_81, 0.93_43],
[-1.70_57, 0.62_71, -1.21_45, 1.38_97, -0.63_03, 1.76_47, -0.11_73, 1.89_85],
[-2.17_31, -1.63_97, -2.73_58, 0.28_54, -2.18_40, 1.71_83, -1.30_18, 2.48_71],
[0.27_17, -3.61_73, -2.92_06, -2.19_88, -3.66_38, 0.38_58, -2.91_55, 2.29_80],
[3.98_59, -2.15_80, -0.79_84, -4.49_04, -4.11_81, -2.02_52, -4.47_82, 1.12_53],
])
_UpperCAmelCase : Optional[int] =tf.constant(
[
[0.00_00, -0.01_00, -0.02_00, -0.03_00, -0.04_00, -0.05_00, -0.06_00, -0.07_00],
[0.20_12, -0.88_97, -0.02_63, -0.94_01, -0.20_74, -0.94_63, -0.34_81, -0.93_43],
[1.70_57, -0.62_71, 1.21_45, -1.38_97, 0.63_03, -1.76_47, 0.11_73, -1.89_85],
[2.17_31, 1.63_97, 2.73_58, -0.28_54, 2.18_40, -1.71_83, 1.30_18, -2.48_71],
[-0.27_17, 3.61_73, 2.92_06, 2.19_88, 3.66_38, -0.38_58, 2.91_55, -2.29_80],
[-3.98_59, 2.15_80, 0.79_84, 4.49_04, 4.11_81, 2.02_52, 4.47_82, -1.12_53],
])
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , __SCREAMING_SNAKE_CASE , atol=self.tolerance)
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , __SCREAMING_SNAKE_CASE , atol=self.tolerance)
| 363 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RobertaPreLayerNormConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roberta_prelayernorm.modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
)
class __magic_name__ ( unittest.TestCase ):
def __init__( self , snake_case , snake_case=1_3 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=9_9 , snake_case=3_2 , snake_case=5 , snake_case=4 , snake_case=3_7 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=5_1_2 , snake_case=1_6 , snake_case=2 , snake_case=0.02 , snake_case=4 , ) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Dict =parent
_UpperCAmelCase : Dict =batch_size
_UpperCAmelCase : List[Any] =seq_length
_UpperCAmelCase : List[str] =is_training
_UpperCAmelCase : Optional[int] =use_attention_mask
_UpperCAmelCase : Dict =use_token_type_ids
_UpperCAmelCase : Dict =use_labels
_UpperCAmelCase : Optional[Any] =vocab_size
_UpperCAmelCase : str =hidden_size
_UpperCAmelCase : Dict =num_hidden_layers
_UpperCAmelCase : Tuple =num_attention_heads
_UpperCAmelCase : List[str] =intermediate_size
_UpperCAmelCase : List[str] =hidden_act
_UpperCAmelCase : int =hidden_dropout_prob
_UpperCAmelCase : Optional[int] =attention_probs_dropout_prob
_UpperCAmelCase : Optional[Any] =max_position_embeddings
_UpperCAmelCase : Union[str, Any] =type_vocab_size
_UpperCAmelCase : Dict =type_sequence_label_size
_UpperCAmelCase : Union[str, Any] =initializer_range
_UpperCAmelCase : Any =num_choices
def lowerCAmelCase ( self) -> Dict:
'''simple docstring'''
_UpperCAmelCase : Dict =ids_tensor([self.batch_size, self.seq_length] , self.vocab_size)
_UpperCAmelCase : str =None
if self.use_attention_mask:
_UpperCAmelCase : Dict =random_attention_mask([self.batch_size, self.seq_length])
_UpperCAmelCase : Optional[Any] =None
if self.use_token_type_ids:
_UpperCAmelCase : Tuple =ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size)
_UpperCAmelCase : Union[str, Any] =RobertaPreLayerNormConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=snake_case , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def lowerCAmelCase ( self) -> str:
'''simple docstring'''
_UpperCAmelCase : Dict =self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : str =config_and_inputs
_UpperCAmelCase : List[Any] ={'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def lowerCAmelCase ( self) -> Optional[Any]:
'''simple docstring'''
_UpperCAmelCase : Tuple =self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] =config_and_inputs
_UpperCAmelCase : Tuple =True
_UpperCAmelCase : Any =floats_tensor([self.batch_size, self.seq_length, self.hidden_size])
_UpperCAmelCase : Optional[Any] =ids_tensor([self.batch_size, self.seq_length] , vocab_size=2)
return (
config,
input_ids,
token_type_ids,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
# Copied from tests.models.roberta.test_modelling_flax_roberta.FlaxRobertaPreLayerNormModelTest with ROBERTA->ROBERTA_PRELAYERNORM,Roberta->RobertaPreLayerNorm,roberta-base->andreasmadsen/efficient_mlm_m0.40
class __magic_name__ ( lowerCAmelCase ,unittest.TestCase ):
UpperCAmelCase =True
UpperCAmelCase =(
(
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
)
if is_flax_available()
else ()
)
def lowerCAmelCase ( self) -> List[str]:
'''simple docstring'''
_UpperCAmelCase : int =FlaxRobertaPreLayerNormModelTester(self)
@slow
def lowerCAmelCase ( self) -> Optional[int]:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCAmelCase : List[str] =model_class_name.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=snake_case)
_UpperCAmelCase : Dict =model(np.ones((1, 1)))
self.assertIsNotNone(snake_case)
@require_flax
class __magic_name__ ( unittest.TestCase ):
@slow
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Tuple =FlaxRobertaPreLayerNormForMaskedLM.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=snake_case)
_UpperCAmelCase : Optional[int] =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa)
_UpperCAmelCase : str =model(snake_case)[0]
_UpperCAmelCase : int =[1, 1_1, 5_0_2_6_5]
self.assertEqual(list(output.shape) , snake_case)
# compare the actual values for a slice.
_UpperCAmelCase : List[str] =np.array(
[[[40.48_80, 18.01_99, -5.23_67], [-1.88_77, -4.08_85, 10.70_85], [-2.26_13, -5.61_10, 7.26_65]]] , dtype=np.floataa)
self.assertTrue(np.allclose(output[:, :3, :3] , snake_case , atol=1E-4))
@slow
def lowerCAmelCase ( self) -> Tuple:
'''simple docstring'''
_UpperCAmelCase : Dict =FlaxRobertaPreLayerNormModel.from_pretrained('andreasmadsen/efficient_mlm_m0.40' , from_pt=snake_case)
_UpperCAmelCase : List[str] =np.array([[0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2]] , dtype=jnp.intaa)
_UpperCAmelCase : Tuple =model(snake_case)[0]
# compare the actual values for a slice.
_UpperCAmelCase : List[str] =np.array(
[[[0.02_08, -0.03_56, 0.02_37], [-0.15_69, -0.04_11, -0.26_26], [0.18_79, 0.01_25, -0.00_89]]] , dtype=np.floataa)
self.assertTrue(np.allclose(output[:, :3, :3] , snake_case , atol=1E-4))
| 242 | 0 |
"""simple docstring"""
def __UpperCAmelCase ( UpperCAmelCase_ : int ) -> bool:
'''simple docstring'''
__snake_case : Dict = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def __UpperCAmelCase ( UpperCAmelCase_ : int = 50_00 ) -> int:
'''simple docstring'''
__snake_case : Tuple = [(i * (3 * i - 1)) // 2 for i in range(1 , UpperCAmelCase_ )]
for i, pentagonal_i in enumerate(UpperCAmelCase_ ):
for j in range(UpperCAmelCase_ , len(UpperCAmelCase_ ) ):
__snake_case : Any = pentagonal_nums[j]
__snake_case : List[Any] = pentagonal_i + pentagonal_j
__snake_case : Optional[int] = pentagonal_j - pentagonal_i
if is_pentagonal(UpperCAmelCase_ ) and is_pentagonal(UpperCAmelCase_ ):
return b
return -1
if __name__ == "__main__":
print(f'''{solution() = }''')
| 172 | """simple docstring"""
import io
import itertools
import json
from dataclasses import dataclass
from typing import Optional
import pyarrow as pa
import pyarrow.json as paj
import datasets
from datasets.table import table_cast
from datasets.utils.file_utils import readline
_a : int= datasets.utils.logging.get_logger(__name__)
@dataclass
class UpperCamelCase ( datasets.BuilderConfig ):
UpperCAmelCase : Optional[datasets.Features] = None
UpperCAmelCase : str = "utf-8"
UpperCAmelCase : Optional[str] = None
UpperCAmelCase : Optional[str] = None
UpperCAmelCase : bool = True # deprecated
UpperCAmelCase : Optional[int] = None # deprecated
UpperCAmelCase : int = 10 << 20 # 10MB
UpperCAmelCase : Optional[bool] = None
class UpperCamelCase ( datasets.ArrowBasedBuilder ):
UpperCAmelCase : int = JsonConfig
def _lowercase (self : int) -> List[str]:
if self.config.block_size is not None:
logger.warning('The JSON loader parameter `block_size` is deprecated. Please use `chunksize` instead')
__snake_case : Any = self.config.block_size
if self.config.use_threads is not True:
logger.warning(
'The JSON loader parameter `use_threads` is deprecated and doesn\'t have any effect anymore.')
if self.config.newlines_in_values is not None:
raise ValueError('The JSON loader parameter `newlines_in_values` is no longer supported')
return datasets.DatasetInfo(features=self.config.features)
def _lowercase (self : Dict , _A : Any) -> Optional[Any]:
if not self.config.data_files:
raise ValueError(f"At least one data file must be specified, but got data_files={self.config.data_files}")
__snake_case : Dict = dl_manager.download_and_extract(self.config.data_files)
if isinstance(_A , (str, list, tuple)):
__snake_case : str = data_files
if isinstance(_A , _A):
__snake_case : int = [files]
__snake_case : Tuple = [dl_manager.iter_files(_A) for file in files]
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={'files': files})]
__snake_case : str = []
for split_name, files in data_files.items():
if isinstance(_A , _A):
__snake_case : Optional[int] = [files]
__snake_case : int = [dl_manager.iter_files(_A) for file in files]
splits.append(datasets.SplitGenerator(name=_A , gen_kwargs={'files': files}))
return splits
def _lowercase (self : Optional[Any] , _A : pa.Table) -> pa.Table:
if self.config.features is not None:
# adding missing columns
for column_name in set(self.config.features) - set(pa_table.column_names):
__snake_case : List[Any] = self.config.features.arrow_schema.field(_A).type
__snake_case : Any = pa_table.append_column(_A , pa.array([None] * len(_A) , type=_A))
# more expensive cast to support nested structures with keys in a different order
# allows str <-> int/float or str to Audio for example
__snake_case : List[str] = table_cast(_A , self.config.features.arrow_schema)
return pa_table
def _lowercase (self : Dict , _A : Any) -> Union[str, Any]:
for file_idx, file in enumerate(itertools.chain.from_iterable(_A)):
# If the file is one json object and if we need to look at the list of items in one specific field
if self.config.field is not None:
with open(_A , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
__snake_case : Tuple = json.load(_A)
# We keep only the field we are interested in
__snake_case : List[str] = dataset[self.config.field]
# We accept two format: a list of dicts or a dict of lists
if isinstance(_A , (list, tuple)):
__snake_case : Optional[int] = set().union(*[row.keys() for row in dataset])
__snake_case : List[str] = {col: [row.get(_A) for row in dataset] for col in keys}
else:
__snake_case : Optional[int] = dataset
__snake_case : Tuple = pa.Table.from_pydict(_A)
yield file_idx, self._cast_table(_A)
# If the file has one json object per line
else:
with open(_A , 'rb') as f:
__snake_case : int = 0
# Use block_size equal to the chunk size divided by 32 to leverage multithreading
# Set a default minimum value of 16kB if the chunk size is really small
__snake_case : Tuple = max(self.config.chunksize // 32 , 16 << 10)
__snake_case : str = (
self.config.encoding_errors if self.config.encoding_errors is not None else 'strict'
)
while True:
__snake_case : Union[str, Any] = f.read(self.config.chunksize)
if not batch:
break
# Finish current line
try:
batch += f.readline()
except (AttributeError, io.UnsupportedOperation):
batch += readline(_A)
# PyArrow only accepts utf-8 encoded bytes
if self.config.encoding != "utf-8":
__snake_case : int = batch.decode(self.config.encoding , errors=_A).encode('utf-8')
try:
while True:
try:
__snake_case : Tuple = paj.read_json(
io.BytesIO(_A) , read_options=paj.ReadOptions(block_size=_A))
break
except (pa.ArrowInvalid, pa.ArrowNotImplementedError) as e:
if (
isinstance(_A , pa.ArrowInvalid)
and "straddling" not in str(_A)
or block_size > len(_A)
):
raise
else:
# Increase the block size in case it was too small.
# The block size will be reset for the next file.
logger.debug(
f"Batch of {len(_A)} bytes couldn't be parsed with block_size={block_size}. Retrying with block_size={block_size * 2}.")
block_size *= 2
except pa.ArrowInvalid as e:
try:
with open(
_A , encoding=self.config.encoding , errors=self.config.encoding_errors) as f:
__snake_case : List[Any] = json.load(_A)
except json.JSONDecodeError:
logger.error(f"Failed to read file '{file}' with error {type(_A)}: {e}")
raise e
# If possible, parse the file as a list of json objects and exit the loop
if isinstance(_A , _A): # list is the only sequence type supported in JSON
try:
__snake_case : List[str] = set().union(*[row.keys() for row in dataset])
__snake_case : List[str] = {col: [row.get(_A) for row in dataset] for col in keys}
__snake_case : List[str] = pa.Table.from_pydict(_A)
except (pa.ArrowInvalid, AttributeError) as e:
logger.error(f"Failed to read file '{file}' with error {type(_A)}: {e}")
raise ValueError(f"Not able to read records in the JSON file at {file}.") from None
yield file_idx, self._cast_table(_A)
break
else:
logger.error(f"Failed to read file '{file}' with error {type(_A)}: {e}")
raise ValueError(
f"Not able to read records in the JSON file at {file}. "
f"You should probably indicate the field of the JSON file containing your records. "
f"This JSON file contain the following fields: {str(list(dataset.keys()))}. "
f"Select the correct one and provide it as `field='XXX'` to the dataset loading method. ") from None
# Uncomment for debugging (will print the Arrow table size and elements)
# logger.warning(f"pa_table: {pa_table} num rows: {pa_table.num_rows}")
# logger.warning('\n'.join(str(pa_table.slice(i, 1).to_pydict()) for i in range(pa_table.num_rows)))
yield (file_idx, batch_idx), self._cast_table(_A)
batch_idx += 1
| 172 | 1 |
import re
from filelock import FileLock
try:
import nltk
__snake_case : Dict = True
except (ImportError, ModuleNotFoundError):
__snake_case : str = False
if NLTK_AVAILABLE:
with FileLock(""".lock""") as lock:
nltk.download("""punkt""", quiet=True)
def _UpperCamelCase ( UpperCamelCase_ : str ) -> int:
"""simple docstring"""
re.sub('<n>' , '' , _A ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_A ) )
| 353 |
from collections.abc import Iterator, MutableMapping
from dataclasses import dataclass
from typing import Generic, TypeVar
__snake_case : Optional[Any] = TypeVar("""KEY""")
__snake_case : str = TypeVar("""VAL""")
@dataclass(frozen=__lowercase , slots=__lowercase)
class __SCREAMING_SNAKE_CASE ( Generic[KEY, VAL]):
_SCREAMING_SNAKE_CASE : KEY
_SCREAMING_SNAKE_CASE : VAL
class __SCREAMING_SNAKE_CASE ( _Item):
def __init__( self ):
"""simple docstring"""
super().__init__(_UpperCamelCase , _UpperCamelCase )
def __bool__( self ):
"""simple docstring"""
return False
__snake_case : int = _DeletedItem()
class __SCREAMING_SNAKE_CASE ( MutableMapping[KEY, VAL]):
def __init__( self , _UpperCamelCase = 8 , _UpperCamelCase = 0.75 ):
"""simple docstring"""
lowerCAmelCase__ = initial_block_size
lowerCAmelCase__ = [None] * initial_block_size
assert 0.0 < capacity_factor < 1.0
lowerCAmelCase__ = capacity_factor
lowerCAmelCase__ = 0
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
return hash(_UpperCamelCase ) % len(self._buckets )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
return (ind + 1) % len(self._buckets )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = self._buckets[ind]
if not stored:
lowerCAmelCase__ = _Item(_UpperCamelCase , _UpperCamelCase )
self._len += 1
return True
elif stored.key == key:
lowerCAmelCase__ = _Item(_UpperCamelCase , _UpperCamelCase )
return True
else:
return False
def UpperCamelCase__ ( self ):
"""simple docstring"""
lowerCAmelCase__ = len(self._buckets ) * self._capacity_factor
return len(self ) >= int(_UpperCamelCase )
def UpperCamelCase__ ( self ):
"""simple docstring"""
if len(self._buckets ) <= self._initial_block_size:
return False
lowerCAmelCase__ = len(self._buckets ) * self._capacity_factor / 2
return len(self ) < limit
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = self._buckets
lowerCAmelCase__ = [None] * new_size
lowerCAmelCase__ = 0
for item in old_buckets:
if item:
self._add_item(item.key , item.val )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._resize(len(self._buckets ) * 2 )
def UpperCamelCase__ ( self ):
"""simple docstring"""
self._resize(len(self._buckets ) // 2 )
def UpperCamelCase__ ( self , _UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ = self._get_bucket_index(_UpperCamelCase )
for _ in range(len(self._buckets ) ):
yield ind
lowerCAmelCase__ = self._get_next_ind(_UpperCamelCase )
def UpperCamelCase__ ( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
for ind in self._iterate_buckets(_UpperCamelCase ):
if self._try_set(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
break
def __setitem__( self , _UpperCamelCase , _UpperCamelCase ):
"""simple docstring"""
if self._is_full():
self._size_up()
self._add_item(_UpperCamelCase , _UpperCamelCase )
def __delitem__( self , _UpperCamelCase ):
"""simple docstring"""
for ind in self._iterate_buckets(_UpperCamelCase ):
lowerCAmelCase__ = self._buckets[ind]
if item is None:
raise KeyError(_UpperCamelCase )
if item is _deleted:
continue
if item.key == key:
lowerCAmelCase__ = _deleted
self._len -= 1
break
if self._is_sparse():
self._size_down()
def __getitem__( self , _UpperCamelCase ):
"""simple docstring"""
for ind in self._iterate_buckets(_UpperCamelCase ):
lowerCAmelCase__ = self._buckets[ind]
if item is None:
break
if item is _deleted:
continue
if item.key == key:
return item.val
raise KeyError(_UpperCamelCase )
def __len__( self ):
"""simple docstring"""
return self._len
def __iter__( self ):
"""simple docstring"""
yield from (item.key for item in self._buckets if item)
def __repr__( self ):
"""simple docstring"""
lowerCAmelCase__ = ' ,'.join(
F"{item.key}: {item.val}" for item in self._buckets if item )
return F"HashMap({val_string})"
| 122 | 0 |
def A ( a_ ) -> list:
if n_term == "":
return []
__UpperCamelCase : list =[]
for temp in range(int(a_ ) ):
series.append(F'1/{temp + 1}' if series else '1' )
return series
if __name__ == "__main__":
A_ :Union[str, Any] = input('''Enter the last number (nth term) of the Harmonic Series''')
print('''Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n''')
print(harmonic_series(nth_term))
| 71 |
def snake_case( __magic_name__ , __magic_name__ ) -> float:
'''simple docstring'''
return price * (1 + tax_rate)
if __name__ == "__main__":
print(f'''{price_plus_tax(1_00, 0.2_5) = }''')
print(f'''{price_plus_tax(1_2_5.5_0, 0.0_5) = }''') | 308 | 0 |
'''simple docstring'''
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def __lowercase ( ) -> Dict:
'''simple docstring'''
_A = HfArgumentParser(__lowercase )
_A = parser.parse_args_into_dataclasses()[0]
_A = TensorFlowBenchmark(args=__lowercase )
try:
_A = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
_A = "Arg --no_{0} is no longer used, please use --no-{0} instead."
_A = " ".join(str(__lowercase ).split(" " )[:-1] )
_A = ""
_A = eval(str(__lowercase ).split(" " )[-1] )
_A = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__lowercase )
if len(__lowercase ) > 0:
_A = full_error_msg + begin_error_msg + str(__lowercase )
raise ValueError(__lowercase )
benchmark.run()
if __name__ == "__main__":
main()
| 371 |
'''simple docstring'''
lowerCamelCase_ = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowerCamelCase_ = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowerCamelCase_ = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 174 | 0 |
def _UpperCAmelCase (UpperCamelCase__ : int = 600851475143 ):
try:
_A : List[str] = int(UpperCamelCase__ )
except (TypeError, ValueError):
raise TypeError("Parameter n must be int or castable to int." )
if n <= 0:
raise ValueError("Parameter n must be greater than or equal to one." )
_A : List[Any] = 1
_A : int = 2
while i * i <= n:
while n % i == 0:
_A : str = i
n //= i
i += 1
if n > 1:
_A : Dict = n
return int(UpperCamelCase__ )
if __name__ == "__main__":
print(f"{solution() = }")
| 11 |
'''simple docstring'''
from __future__ import annotations
from typing import Any
class snake_case ( __lowerCamelCase ):
"""simple docstring"""
pass
class snake_case :
"""simple docstring"""
def __init__( self : List[Any] , __A : Any ):
__UpperCamelCase = data
__UpperCamelCase = None
def __iter__( self : Optional[Any] ):
__UpperCamelCase = self
__UpperCamelCase = []
while node:
if node in visited:
raise ContainsLoopError
visited.append(__A )
yield node.data
__UpperCamelCase = node.next_node
@property
def _lowerCamelCase ( self : List[str] ):
try:
list(self )
return False
except ContainsLoopError:
return True
if __name__ == "__main__":
a__ : Dict =Node(1)
a__ : Optional[int] =Node(2)
a__ : List[str] =Node(3)
a__ : Optional[int] =Node(4)
print(root_node.has_loop) # False
a__ : str =root_node.next_node
print(root_node.has_loop) # True
a__ : Optional[int] =Node(5)
a__ : List[Any] =Node(6)
a__ : int =Node(5)
a__ : Tuple =Node(6)
print(root_node.has_loop) # False
a__ : str =Node(1)
print(root_node.has_loop) # False
| 53 | 0 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
snake_case_ : List[str] = logging.get_logger(__name__)
snake_case_ : int = '▁'
snake_case_ : Any = {
'vocab_file': 'vocab.json',
'spm_file': 'sentencepiece.bpe.model',
}
snake_case_ : Any = {
'vocab_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/vocab.json'
),
},
'spm_file': {
'facebook/s2t-small-librispeech-asr': (
'https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/sentencepiece.bpe.model'
)
},
}
snake_case_ : int = {
'facebook/s2t-small-librispeech-asr': 1024,
}
snake_case_ : Optional[int] = ['pt', 'fr', 'ru', 'nl', 'ro', 'it', 'es', 'de']
snake_case_ : Optional[int] = {'mustc': MUSTC_LANGS}
class lowercase__ ( lowercase ):
lowercase__ = VOCAB_FILES_NAMES
lowercase__ = PRETRAINED_VOCAB_FILES_MAP
lowercase__ = MAX_MODEL_INPUT_SIZES
lowercase__ = ["""input_ids""", """attention_mask"""]
lowercase__ = []
def __init__( self : Union[str, Any] ,lowerCamelCase__ : int ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[Any]="<s>" ,lowerCamelCase__ : List[str]="</s>" ,lowerCamelCase__ : Optional[Any]="<pad>" ,lowerCamelCase__ : Optional[int]="<unk>" ,lowerCamelCase__ : Tuple=False ,lowerCamelCase__ : Dict=False ,lowerCamelCase__ : Tuple=None ,lowerCamelCase__ : List[str]=None ,lowerCamelCase__ : Optional[Dict[str, Any]] = None ,**lowerCamelCase__ : Any ,):
'''simple docstring'''
_UpperCamelCase : int = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=lowerCamelCase__ ,eos_token=lowerCamelCase__ ,unk_token=lowerCamelCase__ ,pad_token=lowerCamelCase__ ,do_upper_case=lowerCamelCase__ ,do_lower_case=lowerCamelCase__ ,tgt_lang=lowerCamelCase__ ,lang_codes=lowerCamelCase__ ,sp_model_kwargs=self.sp_model_kwargs ,**lowerCamelCase__ ,)
_UpperCamelCase : Any = do_upper_case
_UpperCamelCase : List[str] = do_lower_case
_UpperCamelCase : Union[str, Any] = load_json(lowerCamelCase__ )
_UpperCamelCase : Optional[Any] = {v: k for k, v in self.encoder.items()}
_UpperCamelCase : Tuple = spm_file
_UpperCamelCase : Optional[Any] = load_spm(lowerCamelCase__ ,self.sp_model_kwargs )
if lang_codes is not None:
_UpperCamelCase : Tuple = lang_codes
_UpperCamelCase : str = LANGUAGES[lang_codes]
_UpperCamelCase : List[str] = [F'<lang:{lang}>' for lang in self.langs]
_UpperCamelCase : Union[str, Any] = {lang: self.sp_model.PieceToId(F'<lang:{lang}>' ) for lang in self.langs}
_UpperCamelCase : Dict = self.lang_tokens
_UpperCamelCase : str = tgt_lang if tgt_lang is not None else self.langs[0]
self.set_tgt_lang_special_tokens(self._tgt_lang )
else:
_UpperCamelCase : Any = {}
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return len(self.encoder )
@property
def UpperCamelCase_ ( self : Optional[int] ):
'''simple docstring'''
return self._tgt_lang
@tgt_lang.setter
def UpperCamelCase_ ( self : str ,lowerCamelCase__ : List[str] ):
'''simple docstring'''
_UpperCamelCase : Dict = new_tgt_lang
self.set_tgt_lang_special_tokens(lowerCamelCase__ )
def UpperCamelCase_ ( self : Any ,lowerCamelCase__ : str ):
'''simple docstring'''
_UpperCamelCase : Union[str, Any] = self.lang_code_to_id[tgt_lang]
_UpperCamelCase : List[str] = [lang_code_id]
def UpperCamelCase_ ( self : int ,lowerCamelCase__ : str ):
'''simple docstring'''
return self.sp_model.encode(lowerCamelCase__ ,out_type=lowerCamelCase__ )
def UpperCamelCase_ ( self : List[str] ,lowerCamelCase__ : Tuple ):
'''simple docstring'''
return self.encoder.get(lowerCamelCase__ ,self.encoder[self.unk_token] )
def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : int ):
'''simple docstring'''
return self.decoder.get(lowerCamelCase__ ,self.unk_token )
def UpperCamelCase_ ( self : Tuple ,lowerCamelCase__ : List[str] ):
'''simple docstring'''
_UpperCamelCase : int = []
_UpperCamelCase : Optional[int] = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
_UpperCamelCase : List[str] = self.sp_model.decode(lowerCamelCase__ )
out_string += (decoded.upper() if self.do_upper_case else decoded) + token + " "
_UpperCamelCase : List[str] = []
else:
current_sub_tokens.append(lowerCamelCase__ )
_UpperCamelCase : Optional[int] = self.sp_model.decode(lowerCamelCase__ )
out_string += decoded.upper() if self.do_upper_case else decoded
return out_string.strip()
def UpperCamelCase_ ( self : Optional[Any] ,lowerCamelCase__ : Tuple ,lowerCamelCase__ : List[str]=None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + [self.eos_token_id]
def UpperCamelCase_ ( self : int ,lowerCamelCase__ : List[int] ,lowerCamelCase__ : Optional[List[int]] = None ,lowerCamelCase__ : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCamelCase__ ,token_ids_a=lowerCamelCase__ ,already_has_special_tokens=lowerCamelCase__ )
_UpperCamelCase : int = [1] * len(self.prefix_tokens )
_UpperCamelCase : List[str] = [1]
if token_ids_a is None:
return prefix_ones + ([0] * len(lowerCamelCase__ )) + suffix_ones
return prefix_ones + ([0] * len(lowerCamelCase__ )) + ([0] * len(lowerCamelCase__ )) + suffix_ones
def UpperCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = self.encoder.copy()
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Union[str, Any] ):
'''simple docstring'''
_UpperCamelCase : Optional[Any] = self.__dict__.copy()
_UpperCamelCase : Optional[int] = None
return state
def __setstate__( self : Optional[int] ,lowerCamelCase__ : Dict ):
'''simple docstring'''
_UpperCamelCase : List[Any] = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
_UpperCamelCase : str = {}
_UpperCamelCase : str = load_spm(self.spm_file ,self.sp_model_kwargs )
def UpperCamelCase_ ( self : Dict ,lowerCamelCase__ : str ,lowerCamelCase__ : Optional[str] = None ):
'''simple docstring'''
_UpperCamelCase : Optional[int] = Path(lowerCamelCase__ )
assert save_dir.is_dir(), F'{save_directory} should be a directory'
_UpperCamelCase : List[str] = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['vocab_file']
)
_UpperCamelCase : Tuple = save_dir / (
(filename_prefix + '-' if filename_prefix else '') + self.vocab_files_names['spm_file']
)
save_json(self.encoder ,lowerCamelCase__ )
if os.path.abspath(self.spm_file ) != os.path.abspath(lowerCamelCase__ ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file ,lowerCamelCase__ )
elif not os.path.isfile(self.spm_file ):
with open(lowerCamelCase__ ,'wb' ) as fi:
_UpperCamelCase : Dict = self.sp_model.serialized_model_proto()
fi.write(lowerCamelCase__ )
return (str(lowerCamelCase__ ), str(lowerCamelCase__ ))
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
_UpperCamelCase : str = sentencepiece.SentencePieceProcessor(**UpperCAmelCase_ )
spm.Load(str(UpperCAmelCase_ ) )
return spm
def A__ ( UpperCAmelCase_ ):
with open(UpperCAmelCase_ , 'r' ) as f:
return json.load(UpperCAmelCase_ )
def A__ ( UpperCAmelCase_ , UpperCAmelCase_ ):
with open(UpperCAmelCase_ , 'w' ) as f:
json.dump(UpperCAmelCase_ , UpperCAmelCase_ , indent=2 )
| 236 |
'''simple docstring'''
from torch import nn
def A__ ( UpperCAmelCase_ ):
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'Unsupported activation function: {act_fn}' )
| 236 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_snake_case = {
'''configuration_llama''': ['''LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LlamaConfig'''],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''LlamaTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = ['''LlamaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_snake_case = [
'''LlamaForCausalLM''',
'''LlamaModel''',
'''LlamaPreTrainedModel''',
'''LlamaForSequenceClassification''',
]
if TYPE_CHECKING:
from .configuration_llama import LLAMA_PRETRAINED_CONFIG_ARCHIVE_MAP, LlamaConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama import LlamaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_llama_fast import LlamaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_llama import LlamaForCausalLM, LlamaForSequenceClassification, LlamaModel, LlamaPreTrainedModel
else:
import sys
_snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 157 | import random
import timeit
from functools import wraps
from typing import Callable, Optional
from ..configuration_utils import PretrainedConfig
from ..models.auto.modeling_tf_auto import TF_MODEL_MAPPING, TF_MODEL_WITH_LM_HEAD_MAPPING
from ..utils import is_pyanvml_available, is_tf_available, logging
from .benchmark_utils import (
Benchmark,
Memory,
MemorySummary,
measure_peak_memory_cpu,
start_memory_tracing,
stop_memory_tracing,
)
if is_tf_available():
import tensorflow as tf
from tensorflow.python.framework.errors_impl import ResourceExhaustedError
from .benchmark_args_tf import TensorFlowBenchmarkArguments
if is_pyanvml_available():
import pyanvml.pyanvml as nvml
_snake_case = logging.get_logger(__name__)
def _UpperCamelCase ( snake_case__, snake_case__ ) -> List[str]:
def run_func(snake_case__ ):
@wraps(snake_case__ )
def run_in_eager_mode(*snake_case__, **snake_case__ ):
return func(*snake_case__, **snake_case__ )
@wraps(snake_case__ )
@tf.function(experimental_compile=snake_case__ )
def run_in_graph_mode(*snake_case__, **snake_case__ ):
return func(*snake_case__, **snake_case__ )
if do_eager_mode is True:
if use_xla is not False:
raise ValueError(
"Cannot run model in XLA, if `args.eager_mode` is set to `True`. Please set `args.eager_mode=False`." )
return run_in_eager_mode
else:
return run_in_graph_mode
return run_func
def _UpperCamelCase ( snake_case__, snake_case__, snake_case__ ) -> ["tf.Tensor"]:
__UpperCAmelCase : str = random.Random()
__UpperCAmelCase : str = [rng.randint(0, vocab_size - 1 ) for i in range(batch_size * sequence_length )]
return tf.constant(snake_case__, shape=(batch_size, sequence_length), dtype=tf.intaa )
class _snake_case ( _lowercase ):
lowerCamelCase__: TensorFlowBenchmarkArguments
lowerCamelCase__: PretrainedConfig
lowerCamelCase__: str = "TensorFlow"
@property
def _lowerCamelCase ( self: int ) -> Any:
return tf.__version__
def _lowerCamelCase ( self: Dict , __lowerCamelCase: str , __lowerCamelCase: int , __lowerCamelCase: int ) -> float:
# initialize GPU on separate process
__UpperCAmelCase : List[Any] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
__UpperCAmelCase : int = self._prepare_inference_func(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return self._measure_speed(_inference )
def _lowerCamelCase ( self: Tuple , __lowerCamelCase: str , __lowerCamelCase: int , __lowerCamelCase: int ) -> float:
__UpperCAmelCase : Union[str, Any] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
__UpperCAmelCase : Dict = self._prepare_train_func(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return self._measure_speed(_train )
def _lowerCamelCase ( self: Union[str, Any] , __lowerCamelCase: str , __lowerCamelCase: int , __lowerCamelCase: int ) -> [Memory, Optional[MemorySummary]]:
# initialize GPU on separate process
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __lowerCamelCase )
__UpperCAmelCase : List[str] = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
__UpperCAmelCase : int = self._prepare_inference_func(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return self._measure_memory(_inference )
def _lowerCamelCase ( self: str , __lowerCamelCase: str , __lowerCamelCase: int , __lowerCamelCase: int ) -> [Memory, Optional[MemorySummary]]:
if self.args.is_gpu:
tf.config.experimental.set_memory_growth(self.args.gpu_list[self.args.device_idx] , __lowerCamelCase )
__UpperCAmelCase : int = self.args.strategy
if strategy is None:
raise ValueError("A device strategy has to be initialized before using TensorFlow." )
__UpperCAmelCase : int = self._prepare_train_func(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
return self._measure_memory(_train )
def _lowerCamelCase ( self: int , __lowerCamelCase: str , __lowerCamelCase: int , __lowerCamelCase: int ) -> Callable[[], None]:
__UpperCAmelCase : Union[str, Any] = self.config_dict[model_name]
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
__UpperCAmelCase : int = (
hasattr(__lowerCamelCase , "architectures" )
and isinstance(config.architectures , __lowerCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__UpperCAmelCase : int = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
__UpperCAmelCase : Dict = __import__("transformers" , fromlist=[model_class] )
__UpperCAmelCase : str = getattr(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Optional[Any] = model_cls(__lowerCamelCase )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
__UpperCAmelCase : int = TF_MODEL_MAPPING[config.__class__](__lowerCamelCase )
# encoder-decoder has vocab size saved differently
__UpperCAmelCase : List[str] = config.vocab_size if hasattr(__lowerCamelCase , "vocab_size" ) else config.encoder.vocab_size
__UpperCAmelCase : Dict = random_input_ids(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_forward():
return model(__lowerCamelCase , decoder_input_ids=__lowerCamelCase , training=__lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_forward():
return model(__lowerCamelCase , training=__lowerCamelCase )
__UpperCAmelCase : int = encoder_decoder_forward if config.is_encoder_decoder else encoder_forward
return _inference
def _lowerCamelCase ( self: List[str] , __lowerCamelCase: str , __lowerCamelCase: int , __lowerCamelCase: int ) -> Callable[[], None]:
__UpperCAmelCase : Any = self.config_dict[model_name]
if self.args.eager_mode is not False:
raise ValueError("Training cannot be done in eager mode. Please make sure that `args.eager_mode = False`." )
if self.args.fpaa:
raise NotImplementedError("Mixed precision is currently not supported." )
__UpperCAmelCase : Tuple = (
hasattr(__lowerCamelCase , "architectures" )
and isinstance(config.architectures , __lowerCamelCase )
and len(config.architectures ) > 0
)
if not self.args.only_pretrain_model and has_model_class_in_config:
try:
__UpperCAmelCase : Dict = "TF" + config.architectures[0] # prepend 'TF' for tensorflow model
__UpperCAmelCase : Optional[Any] = __import__("transformers" , fromlist=[model_class] )
__UpperCAmelCase : int = getattr(__lowerCamelCase , __lowerCamelCase )
__UpperCAmelCase : Any = model_cls(__lowerCamelCase )
except ImportError:
raise ImportError(
f'''{model_class} does not exist. If you just want to test the pretrained model, you might want to'''
" set `--only_pretrain_model` or `args.only_pretrain_model=True`." )
else:
__UpperCAmelCase : Union[str, Any] = TF_MODEL_WITH_LM_HEAD_MAPPING[config.__class__](__lowerCamelCase )
# encoder-decoder has vocab size saved differently
__UpperCAmelCase : List[Any] = config.vocab_size if hasattr(__lowerCamelCase , "vocab_size" ) else config.encoder.vocab_size
__UpperCAmelCase : Dict = random_input_ids(__lowerCamelCase , __lowerCamelCase , __lowerCamelCase )
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_decoder_train():
__UpperCAmelCase : List[Any] = model(__lowerCamelCase , decoder_input_ids=__lowerCamelCase , labels=__lowerCamelCase , training=__lowerCamelCase )[0]
__UpperCAmelCase : Optional[Any] = tf.gradients(__lowerCamelCase , model.trainable_variables )
return gradients
@run_with_tf_optimizations(self.args.eager_mode , self.args.use_xla )
def encoder_train():
__UpperCAmelCase : Optional[Any] = model(__lowerCamelCase , labels=__lowerCamelCase , training=__lowerCamelCase )[0]
__UpperCAmelCase : List[Any] = tf.gradients(__lowerCamelCase , model.trainable_variables )
return gradients
__UpperCAmelCase : Optional[int] = encoder_decoder_train if config.is_encoder_decoder else encoder_train
return _train
def _lowerCamelCase ( self: List[Any] , __lowerCamelCase: Any ) -> float:
with self.args.strategy.scope():
try:
if self.args.is_tpu or self.args.use_xla:
# run additional 10 times to stabilize compilation for tpu
logger.info("Do inference on TPU. Running model 5 times to stabilize compilation" )
timeit.repeat(__lowerCamelCase , repeat=1 , number=5 )
# as written in https://docs.python.org/2/library/timeit.html#timeit.Timer.repeat, min should be taken rather than the average
__UpperCAmelCase : List[str] = timeit.repeat(
__lowerCamelCase , repeat=self.args.repeat , number=10 , )
return min(__lowerCamelCase ) / 10.0
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
def _lowerCamelCase ( self: Optional[int] , __lowerCamelCase: Callable[[], None] ) -> [Memory, MemorySummary]:
logger.info(
"Note that TensorFlow allocates more memory than "
"it might need to speed up computation. "
"The memory reported here corresponds to the memory "
"reported by `nvidia-smi`, which can vary depending "
"on total available memory on the GPU that is used." )
with self.args.strategy.scope():
try:
if self.args.trace_memory_line_by_line:
if not self.args.eager_mode:
raise ValueError(
"`args.eager_mode` is set to `False`. Make sure to run model in eager mode to measure memory"
" consumption line by line." )
__UpperCAmelCase : Union[str, Any] = start_memory_tracing("transformers" )
if self.args.is_tpu:
# tpu
raise NotImplementedError(
"Memory Benchmarking is currently not implemented for TPU. Please disable memory benchmarking"
" with `args.memory=False`" )
elif self.args.is_gpu:
# gpu
if not is_pyanvml_available():
logger.warning(
"py3nvml not installed, we won't log GPU memory usage. "
"Install py3nvml (pip install py3nvml) to log information about GPU." )
__UpperCAmelCase : Union[str, Any] = "N/A"
else:
logger.info(
"Measuring total GPU usage on GPU device. Make sure to not have additional processes"
" running on the same GPU." )
# init nvml
nvml.nvmlInit()
func()
__UpperCAmelCase : str = nvml.nvmlDeviceGetHandleByIndex(self.args.device_idx )
__UpperCAmelCase : List[Any] = nvml.nvmlDeviceGetMemoryInfo(__lowerCamelCase )
__UpperCAmelCase : List[Any] = meminfo.used
__UpperCAmelCase : List[Any] = Memory(__lowerCamelCase )
# shutdown nvml
nvml.nvmlShutdown()
else:
# cpu
if self.args.trace_memory_line_by_line:
logger.info(
"When enabling line by line tracing, the max peak memory for CPU is inaccurate in"
" TensorFlow." )
__UpperCAmelCase : Tuple = None
else:
__UpperCAmelCase : str = measure_peak_memory_cpu(__lowerCamelCase )
__UpperCAmelCase : Optional[int] = Memory(__lowerCamelCase ) if isinstance(__lowerCamelCase , __lowerCamelCase ) else memory_bytes
if self.args.trace_memory_line_by_line:
__UpperCAmelCase : str = stop_memory_tracing(__lowerCamelCase )
if memory is None:
__UpperCAmelCase : Tuple = summary.total
else:
__UpperCAmelCase : Union[str, Any] = None
return memory, summary
except ResourceExhaustedError as e:
self.print_fn(f'''Doesn\'t fit on GPU. {e}''' )
return "N/A", None
| 157 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
UpperCAmelCase = """▁"""
UpperCAmelCase = {"""vocab_file""": """spiece.model"""}
UpperCAmelCase = {
"""vocab_file""": {
"""google/reformer-crime-and-punishment""": (
"""https://huggingface.co/google/reformer-crime-and-punishment/resolve/main/spiece.model"""
)
}
}
UpperCAmelCase = {
"""google/reformer-crime-and-punishment""": 524_288,
}
class UpperCAmelCase_ ( _lowercase):
snake_case__ = VOCAB_FILES_NAMES
snake_case__ = PRETRAINED_VOCAB_FILES_MAP
snake_case__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
snake_case__ = ['''input_ids''', '''attention_mask''']
def __init__( self : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict="</s>" , __UpperCamelCase : Any="<unk>" , __UpperCamelCase : Any=[] , __UpperCamelCase : Optional[Dict[str, Any]] = None , **__UpperCamelCase : Optional[int] , ) -> None:
_UpperCamelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , additional_special_tokens=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
_UpperCamelCase = vocab_file
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCamelCase )
@property
def _UpperCamelCase ( self : Union[str, Any] ) -> List[str]:
return self.sp_model.get_piece_size()
def _UpperCamelCase ( self : Union[str, Any] ) -> Dict[str, int]:
_UpperCamelCase = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[Any] ) -> Optional[Any]:
_UpperCamelCase = self.__dict__.copy()
_UpperCamelCase = None
return state
def __setstate__( self : Optional[Any] , __UpperCamelCase : Union[str, Any] ) -> List[str]:
_UpperCamelCase = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_UpperCamelCase = {}
_UpperCamelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _UpperCamelCase ( self : List[str] , __UpperCamelCase : str ) -> List[str]:
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def _UpperCamelCase ( self : List[str] , __UpperCamelCase : Any ) -> Dict:
return self.sp_model.piece_to_id(__UpperCamelCase )
def _UpperCamelCase ( self : Union[str, Any] , __UpperCamelCase : Optional[Any] ) -> int:
if index < self.sp_model.get_piece_size():
_UpperCamelCase = self.sp_model.IdToPiece(__UpperCamelCase )
return token
def _UpperCamelCase ( self : int , __UpperCamelCase : Union[str, Any] ) -> str:
_UpperCamelCase = []
_UpperCamelCase = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(__UpperCamelCase ) + token
_UpperCamelCase = []
else:
current_sub_tokens.append(__UpperCamelCase )
out_string += self.sp_model.decode(__UpperCamelCase )
return out_string.strip()
def _UpperCamelCase ( self : List[Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(__UpperCamelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
_UpperCamelCase = os.path.join(
__UpperCamelCase , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , '''wb''' ) as fi:
_UpperCamelCase = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
| 54 | """simple docstring"""
import importlib.metadata
import operator
import re
import sys
from typing import Optional
from packaging import version
UpperCAmelCase = {
"""<""": operator.lt,
"""<=""": operator.le,
"""==""": operator.eq,
"""!=""": operator.ne,
""">=""": operator.ge,
""">""": operator.gt,
}
def lowercase ( a__ : Union[str, Any] , a__ : int , a__ : List[Any] , a__ : Union[str, Any] , a__ : Tuple , a__ : List[Any] ) -> Optional[Any]:
if got_ver is None or want_ver is None:
raise ValueError(
F'''Unable to compare versions for {requirement}: need={want_ver} found={got_ver}. This is unusual. Consider'''
F''' reinstalling {pkg}.''' )
if not ops[op](version.parse(a__ ) , version.parse(a__ ) ):
raise ImportError(
F'''{requirement} is required for a normal functioning of this module, but found {pkg}=={got_ver}.{hint}''' )
def lowercase ( a__ : str , a__ : Optional[str] = None ) -> None:
_UpperCamelCase = F'''\n{hint}''' if hint is not None else ''''''
# non-versioned check
if re.match(R'''^[\w_\-\d]+$''' , a__ ):
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = requirement, None, None
else:
_UpperCamelCase = re.findall(R'''^([^!=<>\s]+)([\s!=<>]{1,2}.+)''' , a__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23, but'''
F''' got {requirement}''' )
_UpperCamelCase , _UpperCamelCase = match[0]
_UpperCamelCase = want_full.split(''',''' ) # there could be multiple requirements
_UpperCamelCase = {}
for w in want_range:
_UpperCamelCase = re.findall(R'''^([\s!=<>]{1,2})(.+)''' , a__ )
if not match:
raise ValueError(
'''requirement needs to be in the pip package format, .e.g., package_a==1.23, or package_b>=1.23,'''
F''' but got {requirement}''' )
_UpperCamelCase , _UpperCamelCase = match[0]
_UpperCamelCase = want_ver
if op not in ops:
raise ValueError(F'''{requirement}: need one of {list(ops.keys() )}, but got {op}''' )
# special case
if pkg == "python":
_UpperCamelCase = '''.'''.join([str(a__ ) for x in sys.version_info[:3]] )
for op, want_ver in wanted.items():
_compare_versions(a__ , a__ , a__ , a__ , a__ , a__ )
return
# check if any version is installed
try:
_UpperCamelCase = importlib.metadata.version(a__ )
except importlib.metadata.PackageNotFoundError:
raise importlib.metadata.PackageNotFoundError(
F'''The \'{requirement}\' distribution was not found and is required by this application. {hint}''' )
# check that the right version is installed if version number or a range was provided
if want_ver is not None:
for op, want_ver in wanted.items():
_compare_versions(a__ , a__ , a__ , a__ , a__ , a__ )
def lowercase ( a__ : Tuple ) -> Any:
_UpperCamelCase = '''Try: pip install transformers -U or pip install -e \'.[dev]\' if you\'re working with git main'''
return require_version(a__ , a__ )
| 54 | 1 |
def UpperCAmelCase__ (UpperCamelCase_ ):
"""simple docstring"""
if number > 0:
raise ValueError('''input must be a negative integer''' )
snake_case = len(bin(UpperCamelCase_ )[3:] )
snake_case = bin(abs(UpperCamelCase_ ) - (1 << binary_number_length) )[3:]
snake_case = (
(
'''1'''
+ '''0''' * (binary_number_length - len(UpperCamelCase_ ))
+ twos_complement_number
)
if number < 0
else '''0'''
)
return "0b" + twos_complement_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 127 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_SCREAMING_SNAKE_CASE : List[Any] = logging.get_logger(__name__)
_SCREAMING_SNAKE_CASE : Optional[Any] = {
"google/mobilenet_v2_1.4_224": "https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json",
"google/mobilenet_v2_1.0_224": "https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json",
"google/mobilenet_v2_0.75_160": "https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json",
"google/mobilenet_v2_0.35_96": "https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = 'mobilenet_v2'
def __init__( self , __snake_case=3 , __snake_case=2_2_4 , __snake_case=1.0 , __snake_case=8 , __snake_case=8 , __snake_case=6 , __snake_case=3_2 , __snake_case=True , __snake_case=True , __snake_case="relu6" , __snake_case=True , __snake_case=0.8 , __snake_case=0.02 , __snake_case=0.001 , __snake_case=2_5_5 , **__snake_case , ):
super().__init__(**__snake_case )
if depth_multiplier <= 0:
raise ValueError('''depth_multiplier must be greater than zero.''' )
snake_case = num_channels
snake_case = image_size
snake_case = depth_multiplier
snake_case = depth_divisible_by
snake_case = min_depth
snake_case = expand_ratio
snake_case = output_stride
snake_case = first_layer_is_expansion
snake_case = finegrained_output
snake_case = hidden_act
snake_case = tf_padding
snake_case = classifier_dropout_prob
snake_case = initializer_range
snake_case = layer_norm_eps
snake_case = semantic_loss_ignore_index
class A__ ( snake_case__ ):
"""simple docstring"""
__magic_name__ = version.parse('1.11' )
@property
def a_ ( self ):
return OrderedDict([('''pixel_values''', {0: '''batch'''})] )
@property
def a_ ( self ):
if self.task == "image-classification":
return OrderedDict([('''logits''', {0: '''batch'''})] )
else:
return OrderedDict([('''last_hidden_state''', {0: '''batch'''}), ('''pooler_output''', {0: '''batch'''})] )
@property
def a_ ( self ):
return 1E-4
| 127 | 1 |
'''simple docstring'''
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class a__ ( a__ ):
'''simple docstring'''
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> List[str]:
with open(lowerCamelCase_ , encoding='''utf-8''' ) as input_file:
lowerCAmelCase__ = re.compile(r'''(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)''' )
lowerCAmelCase__ = input_file.read()
lowerCAmelCase__ = regexp.search(lowerCamelCase_ )
return match
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ ) -> Any:
with open(lowerCamelCase_ , encoding='''utf-8''' ) as input_file:
lowerCAmelCase__ = re.compile(r'''#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()''' , re.DOTALL )
lowerCAmelCase__ = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
lowerCAmelCase__ = regexp.finditer(lowerCamelCase_ )
lowerCAmelCase__ = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def __SCREAMING_SNAKE_CASE ( self ) -> List[Any]:
lowerCAmelCase__ = Path('''./datasets''' )
lowerCAmelCase__ = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(lowerCamelCase_ ) ):
raise AssertionError(F"""open(...) must use utf-8 encoding in {dataset}""" )
def __SCREAMING_SNAKE_CASE ( self ) -> int:
lowerCAmelCase__ = Path('''./datasets''' )
lowerCAmelCase__ = list(dataset_paths.absolute().glob('''**/*.py''' ) )
for dataset in dataset_files:
if self._no_print_statements(str(lowerCamelCase_ ) ):
raise AssertionError(F"""print statement found in {dataset}. Use datasets.logger/logging instead.""" ) | 228 |
'''simple docstring'''
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
__UpperCAmelCase = logging.get_logger(__name__)
def _snake_case ( A , A , A ) -> Optional[Any]:
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def _snake_case ( A , A , A ) -> Union[str, Any]:
lowerCAmelCase__ = to_pil_image(A )
lowerCAmelCase__ , lowerCAmelCase__ = pil_image.size
lowerCAmelCase__ = pytesseract.image_to_data(A , lang=A , output_type='''dict''' , config=A )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ = data['''text'''], data['''left'''], data['''top'''], data['''width'''], data['''height''']
# filter empty words and corresponding coordinates
lowerCAmelCase__ = [idx for idx, word in enumerate(A ) if not word.strip()]
lowerCAmelCase__ = [word for idx, word in enumerate(A ) if idx not in irrelevant_indices]
lowerCAmelCase__ = [coord for idx, coord in enumerate(A ) if idx not in irrelevant_indices]
lowerCAmelCase__ = [coord for idx, coord in enumerate(A ) if idx not in irrelevant_indices]
lowerCAmelCase__ = [coord for idx, coord in enumerate(A ) if idx not in irrelevant_indices]
lowerCAmelCase__ = [coord for idx, coord in enumerate(A ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
lowerCAmelCase__ = []
for x, y, w, h in zip(A , A , A , A ):
lowerCAmelCase__ = [x, y, x + w, y + h]
actual_boxes.append(A )
# finally, normalize the bounding boxes
lowerCAmelCase__ = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(A , A , A ) )
assert len(A ) == len(A ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class a__ ( a__ ):
'''simple docstring'''
lowercase__ : Any = ["pixel_values"]
def __init__( self , lowerCamelCase_ = True , lowerCamelCase_ = None , lowerCamelCase_ = PILImageResampling.BILINEAR , lowerCamelCase_ = True , lowerCamelCase_ = 1 / 2_55 , lowerCamelCase_ = True , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = True , lowerCamelCase_ = None , lowerCamelCase_ = "" , **lowerCamelCase_ , ) -> None:
super().__init__(**lowerCamelCase_ )
lowerCAmelCase__ = size if size is not None else {'''height''': 2_24, '''width''': 2_24}
lowerCAmelCase__ = get_size_dict(lowerCamelCase_ )
lowerCAmelCase__ = do_resize
lowerCAmelCase__ = size
lowerCAmelCase__ = resample
lowerCAmelCase__ = do_rescale
lowerCAmelCase__ = rescale_value
lowerCAmelCase__ = do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
lowerCAmelCase__ = image_std if image_std is not None else IMAGENET_STANDARD_STD
lowerCAmelCase__ = apply_ocr
lowerCAmelCase__ = ocr_lang
lowerCAmelCase__ = tesseract_config
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = PILImageResampling.BILINEAR , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> np.ndarray:
lowerCAmelCase__ = get_size_dict(lowerCamelCase_ )
if "height" not in size or "width" not in size:
raise ValueError(F"""The size dictionary must contain the keys 'height' and 'width'. Got {size.keys()}""" )
lowerCAmelCase__ = (size['''height'''], size['''width'''])
return resize(lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> np.ndarray:
return rescale(lowerCamelCase_ , scale=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ = None , **lowerCamelCase_ , ) -> np.ndarray:
return normalize(lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ , data_format=lowerCamelCase_ , **lowerCamelCase_ )
def __SCREAMING_SNAKE_CASE ( self , lowerCamelCase_ , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_=None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = None , lowerCamelCase_ = ChannelDimension.FIRST , **lowerCamelCase_ , ) -> PIL.Image.Image:
lowerCAmelCase__ = do_resize if do_resize is not None else self.do_resize
lowerCAmelCase__ = size if size is not None else self.size
lowerCAmelCase__ = get_size_dict(lowerCamelCase_ )
lowerCAmelCase__ = resample if resample is not None else self.resample
lowerCAmelCase__ = do_rescale if do_rescale is not None else self.do_rescale
lowerCAmelCase__ = rescale_factor if rescale_factor is not None else self.rescale_factor
lowerCAmelCase__ = do_normalize if do_normalize is not None else self.do_normalize
lowerCAmelCase__ = image_mean if image_mean is not None else self.image_mean
lowerCAmelCase__ = image_std if image_std is not None else self.image_std
lowerCAmelCase__ = apply_ocr if apply_ocr is not None else self.apply_ocr
lowerCAmelCase__ = ocr_lang if ocr_lang is not None else self.ocr_lang
lowerCAmelCase__ = tesseract_config if tesseract_config is not None else self.tesseract_config
lowerCAmelCase__ = make_list_of_images(lowerCamelCase_ )
if not valid_images(lowerCamelCase_ ):
raise ValueError(
'''Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '''
'''torch.Tensor, tf.Tensor or jax.ndarray.''' )
if do_resize and size is None:
raise ValueError('''Size must be specified if do_resize is True.''' )
if do_rescale and rescale_factor is None:
raise ValueError('''Rescale factor must be specified if do_rescale is True.''' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('''If do_normalize is True, image_mean and image_std must be specified.''' )
# All transformations expect numpy arrays.
lowerCAmelCase__ = [to_numpy_array(lowerCamelCase_ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self , '''pytesseract''' )
lowerCAmelCase__ = []
lowerCAmelCase__ = []
for image in images:
lowerCAmelCase__ , lowerCAmelCase__ = apply_tesseract(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
words_batch.append(lowerCamelCase_ )
boxes_batch.append(lowerCamelCase_ )
if do_resize:
lowerCAmelCase__ = [self.resize(image=lowerCamelCase_ , size=lowerCamelCase_ , resample=lowerCamelCase_ ) for image in images]
if do_rescale:
lowerCAmelCase__ = [self.rescale(image=lowerCamelCase_ , scale=lowerCamelCase_ ) for image in images]
if do_normalize:
lowerCAmelCase__ = [self.normalize(image=lowerCamelCase_ , mean=lowerCamelCase_ , std=lowerCamelCase_ ) for image in images]
lowerCAmelCase__ = [to_channel_dimension_format(lowerCamelCase_ , lowerCamelCase_ ) for image in images]
lowerCAmelCase__ = BatchFeature(data={'''pixel_values''': images} , tensor_type=lowerCamelCase_ )
if apply_ocr:
lowerCAmelCase__ = words_batch
lowerCAmelCase__ = boxes_batch
return data | 228 | 1 |
from __future__ import annotations
def lowerCAmelCase_ ( _lowercase : float , _lowercase : float , _lowercase : float , ) -> tuple[str, float]:
"""simple docstring"""
if (stress, tangential_force, area).count(0) != 1:
raise ValueError("""You cannot supply more or less than 2 values""")
elif stress < 0:
raise ValueError("""Stress cannot be negative""")
elif tangential_force < 0:
raise ValueError("""Tangential Force cannot be negative""")
elif area < 0:
raise ValueError("""Area cannot be negative""")
elif stress == 0:
return (
"stress",
tangential_force / area,
)
elif tangential_force == 0:
return (
"tangential_force",
stress * area,
)
else:
return (
"area",
tangential_force / stress,
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 170 |
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase : int =logging.get_logger(__name__)
_lowercase : Union[str, Any] ={
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
_lowercase : Dict ={
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
_lowercase : Any ={"facebook/blenderbot_small-90M": 512}
def lowerCAmelCase_ ( _lowercase : Any) -> Optional[Any]:
"""simple docstring"""
a__ : List[str] = set()
a__ : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
a__ : Optional[Any] = char
a__ : Tuple = set(_lowercase)
return pairs
class snake_case__ (A__ ):
"""simple docstring"""
__lowerCAmelCase :List[Any] = VOCAB_FILES_NAMES
__lowerCAmelCase :Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
__lowerCAmelCase :List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCAmelCase :Any = ["input_ids", "attention_mask"]
def __init__( self , __lowercase , __lowercase , __lowercase="__start__" , __lowercase="__end__" , __lowercase="__unk__" , __lowercase="__null__" , **__lowercase , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(unk_token=__lowercase , bos_token=__lowercase , eos_token=__lowercase , pad_token=__lowercase , **__lowercase )
with open(__lowercase , encoding="""utf-8""" ) as vocab_handle:
a__ : Optional[int] = json.load(__lowercase )
a__ : str = {v: k for k, v in self.encoder.items()}
with open(__lowercase , encoding="""utf-8""" ) as merges_handle:
a__ : Any = merges_handle.read().split("""\n""" )[1:-1]
a__ : Optional[Any] = [tuple(merge.split() ) for merge in merges]
a__ : Dict = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
a__ : Dict = {}
@property
def SCREAMING_SNAKE_CASE__( self ) -> int:
"""simple docstring"""
return len(self.encoder )
def SCREAMING_SNAKE_CASE__( self ) -> Dict:
"""simple docstring"""
return dict(self.encoder , **self.added_tokens_encoder )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> str:
"""simple docstring"""
if token in self.cache:
return self.cache[token]
a__ : Any = re.sub("""([.,!?()])""" , r""" \1""" , __lowercase )
a__ : int = re.sub("""(')""" , r""" \1 """ , __lowercase )
a__ : Tuple = re.sub(r"""\s{2,}""" , """ """ , __lowercase )
if "\n" in token:
a__ : Union[str, Any] = token.replace("""\n""" , """ __newln__""" )
a__ : Optional[int] = token.split(""" """ )
a__ : Union[str, Any] = []
for token in tokens:
if not len(__lowercase ):
continue
a__ : Union[str, Any] = token.lower()
a__ : List[Any] = tuple(__lowercase )
a__ : Optional[int] = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
a__ : Any = get_pairs(__lowercase )
if not pairs:
words.append(__lowercase )
continue
while True:
a__ : Optional[int] = min(__lowercase , key=lambda __lowercase : self.bpe_ranks.get(__lowercase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
a__ , a__ : str = bigram
a__ : str = []
a__ : Optional[Any] = 0
while i < len(__lowercase ):
try:
a__ : Tuple = word.index(__lowercase , __lowercase )
new_word.extend(word[i:j] )
a__ : Optional[Any] = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(__lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
a__ : List[Any] = tuple(__lowercase )
a__ : Any = new_word
if len(__lowercase ) == 1:
break
else:
a__ : Optional[int] = get_pairs(__lowercase )
a__ : List[Any] = """@@ """.join(__lowercase )
a__ : Optional[Any] = word[:-4]
a__ : Any = word
words.append(__lowercase )
return " ".join(__lowercase )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> List[str]:
"""simple docstring"""
a__ : Dict = []
a__ : Optional[Any] = re.findall(r"""\S+\n?""" , __lowercase )
for token in words:
split_tokens.extend(list(self.bpe(__lowercase ).split(""" """ ) ) )
return split_tokens
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> int:
"""simple docstring"""
a__ : Tuple = token.lower()
return self.encoder.get(__lowercase , self.encoder.get(self.unk_token ) )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> str:
"""simple docstring"""
return self.decoder.get(__lowercase , self.unk_token )
def SCREAMING_SNAKE_CASE__( self , __lowercase ) -> str:
"""simple docstring"""
a__ : int = """ """.join(__lowercase ).replace("""@@ """ , """""" ).strip()
return out_string
def SCREAMING_SNAKE_CASE__( self , __lowercase , __lowercase = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(__lowercase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
a__ : Dict = os.path.join(
__lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
a__ : List[Any] = os.path.join(
__lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(__lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowercase , ensure_ascii=__lowercase ) + """\n""" )
a__ : List[str] = 0
with open(__lowercase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __lowercase : kv[1] ):
if index != token_index:
logger.warning(
F'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
""" Please check that the tokenizer is not corrupted!""" )
a__ : Optional[int] = token_index
writer.write(""" """.join(__lowercase ) + """\n""" )
index += 1
return vocab_file, merge_file
| 170 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : List[Any] = {
"""facebook/xmod-base""": """https://huggingface.co/facebook/xmod-base/resolve/main/config.json""",
"""facebook/xmod-large-prenorm""": """https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json""",
"""facebook/xmod-base-13-125k""": """https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-125k""": """https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json""",
"""facebook/xmod-base-30-195k""": """https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json""",
"""facebook/xmod-base-60-125k""": """https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json""",
"""facebook/xmod-base-60-265k""": """https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json""",
"""facebook/xmod-base-75-125k""": """https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json""",
"""facebook/xmod-base-75-269k""": """https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json""",
}
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : Dict = '''xmod'''
def __init__( self : str , lowerCAmelCase__ : Any=3_0522 , lowerCAmelCase__ : Tuple=768 , lowerCAmelCase__ : Tuple=12 , lowerCAmelCase__ : int=12 , lowerCAmelCase__ : Dict=3072 , lowerCAmelCase__ : List[str]="gelu" , lowerCAmelCase__ : Any=0.1 , lowerCAmelCase__ : Any=0.1 , lowerCAmelCase__ : Dict=512 , lowerCAmelCase__ : Optional[Any]=2 , lowerCAmelCase__ : str=0.02 , lowerCAmelCase__ : Union[str, Any]=1E-12 , lowerCAmelCase__ : int=1 , lowerCAmelCase__ : List[Any]=0 , lowerCAmelCase__ : Optional[int]=2 , lowerCAmelCase__ : Any="absolute" , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : List[Any]=None , lowerCAmelCase__ : Any=False , lowerCAmelCase__ : Union[str, Any]=2 , lowerCAmelCase__ : Dict=False , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Optional[Any]=("en_XX",) , lowerCAmelCase__ : int=None , **lowerCAmelCase__ : int , ):
super().__init__(pad_token_id=lowerCAmelCase__ , bos_token_id=lowerCAmelCase__ , eos_token_id=lowerCAmelCase__ , **lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Union[str, Any] = vocab_size
SCREAMING_SNAKE_CASE_: List[Any] = hidden_size
SCREAMING_SNAKE_CASE_: Tuple = num_hidden_layers
SCREAMING_SNAKE_CASE_: str = num_attention_heads
SCREAMING_SNAKE_CASE_: str = hidden_act
SCREAMING_SNAKE_CASE_: Union[str, Any] = intermediate_size
SCREAMING_SNAKE_CASE_: Any = hidden_dropout_prob
SCREAMING_SNAKE_CASE_: Dict = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_: str = max_position_embeddings
SCREAMING_SNAKE_CASE_: List[Any] = type_vocab_size
SCREAMING_SNAKE_CASE_: Tuple = initializer_range
SCREAMING_SNAKE_CASE_: str = layer_norm_eps
SCREAMING_SNAKE_CASE_: Union[str, Any] = position_embedding_type
SCREAMING_SNAKE_CASE_: Tuple = use_cache
SCREAMING_SNAKE_CASE_: Optional[int] = classifier_dropout
SCREAMING_SNAKE_CASE_: Dict = pre_norm
SCREAMING_SNAKE_CASE_: str = adapter_reduction_factor
SCREAMING_SNAKE_CASE_: List[str] = adapter_layer_norm
SCREAMING_SNAKE_CASE_: int = adapter_reuse_layer_norm
SCREAMING_SNAKE_CASE_: str = ln_before_adapter
SCREAMING_SNAKE_CASE_: str = list(lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[Any] = default_language
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any]):
if self.task == "multiple-choice":
SCREAMING_SNAKE_CASE_: str = {0: "batch", 1: "choice", 2: "sequence"}
else:
SCREAMING_SNAKE_CASE_: str = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
])
| 369 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_convbert import ConvBertTokenizer
lowerCAmelCase : Any = logging.get_logger(__name__)
lowerCAmelCase : Dict = {"""vocab_file""": """vocab.txt"""}
lowerCAmelCase : List[str] = {
"""vocab_file""": {
"""YituTech/conv-bert-base""": """https://huggingface.co/YituTech/conv-bert-base/resolve/main/vocab.txt""",
"""YituTech/conv-bert-medium-small""": (
"""https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/vocab.txt"""
),
"""YituTech/conv-bert-small""": """https://huggingface.co/YituTech/conv-bert-small/resolve/main/vocab.txt""",
}
}
lowerCAmelCase : List[Any] = {
"""YituTech/conv-bert-base""": 512,
"""YituTech/conv-bert-medium-small""": 512,
"""YituTech/conv-bert-small""": 512,
}
lowerCAmelCase : Tuple = {
"""YituTech/conv-bert-base""": {"""do_lower_case""": True},
"""YituTech/conv-bert-medium-small""": {"""do_lower_case""": True},
"""YituTech/conv-bert-small""": {"""do_lower_case""": True},
}
class __lowercase ( UpperCAmelCase_ ):
"""simple docstring"""
_UpperCAmelCase : str = VOCAB_FILES_NAMES
_UpperCAmelCase : List[str] = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase : List[Any] = PRETRAINED_INIT_CONFIGURATION
_UpperCAmelCase : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase : Dict = ConvBertTokenizer
def __init__( self : Optional[Any] , lowerCAmelCase__ : Optional[int]=None , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : str=True , lowerCAmelCase__ : Any="[UNK]" , lowerCAmelCase__ : Optional[Any]="[SEP]" , lowerCAmelCase__ : Any="[PAD]" , lowerCAmelCase__ : Dict="[CLS]" , lowerCAmelCase__ : Dict="[MASK]" , lowerCAmelCase__ : Tuple=True , lowerCAmelCase__ : str=None , **lowerCAmelCase__ : Dict , ):
super().__init__(
lowerCAmelCase__ , tokenizer_file=lowerCAmelCase__ , do_lower_case=lowerCAmelCase__ , unk_token=lowerCAmelCase__ , sep_token=lowerCAmelCase__ , pad_token=lowerCAmelCase__ , cls_token=lowerCAmelCase__ , mask_token=lowerCAmelCase__ , tokenize_chinese_chars=lowerCAmelCase__ , strip_accents=lowerCAmelCase__ , **lowerCAmelCase__ , )
SCREAMING_SNAKE_CASE_: List[str] = json.loads(self.backend_tokenizer.normalizer.__getstate__())
if (
normalizer_state.get("lowercase" , lowerCAmelCase__) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase__) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase__) != tokenize_chinese_chars
):
SCREAMING_SNAKE_CASE_: Optional[int] = getattr(lowerCAmelCase__ , normalizer_state.pop("type"))
SCREAMING_SNAKE_CASE_: Optional[Any] = do_lower_case
SCREAMING_SNAKE_CASE_: List[str] = strip_accents
SCREAMING_SNAKE_CASE_: Optional[Any] = tokenize_chinese_chars
SCREAMING_SNAKE_CASE_: Optional[int] = normalizer_class(**lowerCAmelCase__)
SCREAMING_SNAKE_CASE_: Optional[int] = do_lower_case
def _SCREAMING_SNAKE_CASE ( self : str , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Union[str, Any]=None):
SCREAMING_SNAKE_CASE_: List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , lowerCAmelCase__ : List[int] , lowerCAmelCase__ : Optional[List[int]] = None):
SCREAMING_SNAKE_CASE_: Optional[int] = [self.sep_token_id]
SCREAMING_SNAKE_CASE_: int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep) * [0]
return len(cls + token_ids_a + sep) * [0] + len(token_ids_a + sep) * [1]
def _SCREAMING_SNAKE_CASE ( self : List[Any] , lowerCAmelCase__ : str , lowerCAmelCase__ : Optional[str] = None):
SCREAMING_SNAKE_CASE_: Any = self._tokenizer.model.save(lowerCAmelCase__ , name=lowerCAmelCase__)
return tuple(lowerCAmelCase__)
| 127 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
lowercase__ = logging.get_logger(__name__)
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase_ : str = WavaVecaForSequenceClassification.from_pretrained(__UpperCamelCase , config=__UpperCamelCase )
lowerCAmelCase_ : str = downstream_dict["projector.weight"]
lowerCAmelCase_ : Optional[int] = downstream_dict["projector.bias"]
lowerCAmelCase_ : int = downstream_dict["model.post_net.linear.weight"]
lowerCAmelCase_ : Optional[Any] = downstream_dict["model.post_net.linear.bias"]
return model
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[Any]:
"""simple docstring"""
lowerCAmelCase_ : Optional[Any] = WavaVecaForAudioFrameClassification.from_pretrained(__UpperCamelCase , config=__UpperCamelCase )
lowerCAmelCase_ : str = downstream_dict["model.linear.weight"]
lowerCAmelCase_ : List[Any] = downstream_dict["model.linear.bias"]
return model
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> str:
"""simple docstring"""
lowerCAmelCase_ : int = WavaVecaForXVector.from_pretrained(__UpperCamelCase , config=__UpperCamelCase )
lowerCAmelCase_ : Any = downstream_dict["connector.weight"]
lowerCAmelCase_ : Union[str, Any] = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
lowerCAmelCase_ : Union[str, Any] = downstream_dict[
f'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
lowerCAmelCase_ : Union[str, Any] = downstream_dict[f'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
lowerCAmelCase_ : Any = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
lowerCAmelCase_ : int = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
lowerCAmelCase_ : Optional[int] = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
lowerCAmelCase_ : Any = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
lowerCAmelCase_ : int = downstream_dict["objective.W"]
return model
@torch.no_grad()
def __lowerCamelCase ( __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase ) -> List[str]:
"""simple docstring"""
lowerCAmelCase_ : int = torch.load(__UpperCamelCase , map_location="cpu" )
lowerCAmelCase_ : Tuple = checkpoint["Downstream"]
lowerCAmelCase_ : Any = WavaVecaConfig.from_pretrained(__UpperCamelCase )
lowerCAmelCase_ : Optional[Any] = WavaVecaFeatureExtractor.from_pretrained(
__UpperCamelCase , return_attention_mask=__UpperCamelCase , do_normalize=__UpperCamelCase )
lowerCAmelCase_ : Optional[int] = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
lowerCAmelCase_ : Any = convert_classification(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
elif arch.endswith("ForAudioFrameClassification" ):
lowerCAmelCase_ : Any = convert_diarization(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
elif arch.endswith("ForXVector" ):
lowerCAmelCase_ : Dict = convert_xvector(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
else:
raise NotImplementedError(f'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
lowerCAmelCase_ : int = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(__UpperCamelCase )
hf_model.save_pretrained(__UpperCamelCase )
if __name__ == "__main__":
lowercase__ = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
lowercase__ = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 241 |
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCamelCase ( A__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , a_ : int , a_ : Optional[int]=13 , a_ : Optional[Any]=7 , a_ : Tuple=True , a_ : Optional[int]=True , a_ : List[str]=True , a_ : Union[str, Any]=True , a_ : List[Any]=99 , a_ : List[Any]=32 , a_ : Dict=5 , a_ : Tuple=4 , a_ : Any=37 , a_ : int="gelu" , a_ : Any=0.1 , a_ : Union[str, Any]=0.1 , a_ : Dict=5_12 , a_ : Union[str, Any]=16 , a_ : Optional[int]=2 , a_ : Dict=0.02 , a_ : List[str]=False , a_ : str=True , a_ : Any="None" , a_ : Dict=3 , a_ : List[str]=4 , a_ : Optional[Any]=None , ):
lowerCAmelCase_ : str = parent
lowerCAmelCase_ : Optional[Any] = batch_size
lowerCAmelCase_ : Any = seq_length
lowerCAmelCase_ : int = is_training
lowerCAmelCase_ : List[Any] = use_input_mask
lowerCAmelCase_ : str = use_token_type_ids
lowerCAmelCase_ : Dict = use_labels
lowerCAmelCase_ : Optional[Any] = vocab_size
lowerCAmelCase_ : List[str] = hidden_size
lowerCAmelCase_ : Optional[Any] = num_hidden_layers
lowerCAmelCase_ : Optional[int] = num_attention_heads
lowerCAmelCase_ : Optional[Any] = intermediate_size
lowerCAmelCase_ : List[Any] = hidden_act
lowerCAmelCase_ : List[str] = hidden_dropout_prob
lowerCAmelCase_ : Tuple = attention_probs_dropout_prob
lowerCAmelCase_ : int = max_position_embeddings
lowerCAmelCase_ : Any = type_vocab_size
lowerCAmelCase_ : Dict = type_sequence_label_size
lowerCAmelCase_ : Optional[int] = initializer_range
lowerCAmelCase_ : Optional[Any] = num_labels
lowerCAmelCase_ : List[Any] = num_choices
lowerCAmelCase_ : Optional[Any] = relative_attention
lowerCAmelCase_ : Optional[int] = position_biased_input
lowerCAmelCase_ : Union[str, Any] = pos_att_type
lowerCAmelCase_ : Tuple = scope
def lowerCamelCase ( self : Dict ):
lowerCAmelCase_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase_ : str = None
if self.use_input_mask:
lowerCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowerCAmelCase_ : int = None
if self.use_token_type_ids:
lowerCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Dict = None
if self.use_labels:
lowerCAmelCase_ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase_ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase_ : Union[str, Any] = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase_ : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase ( self : int ):
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def lowerCamelCase ( self : Any ):
lowerCAmelCase_ : Union[str, Any] = self.get_config()
lowerCAmelCase_ : Tuple = 3_00
return config
def lowerCamelCase ( self : List[Any] , a_ : Optional[Any] ):
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def lowerCamelCase ( self : Optional[Any] , a_ : List[str] , a_ : Union[str, Any] , a_ : Dict , a_ : str , a_ : int , a_ : Any , a_ : Tuple ):
lowerCAmelCase_ : Union[str, Any] = DebertaModel(config=a_ )
model.to(a_ )
model.eval()
lowerCAmelCase_ : str = model(a_ , attention_mask=a_ , token_type_ids=a_ )[0]
lowerCAmelCase_ : List[str] = model(a_ , token_type_ids=a_ )[0]
lowerCAmelCase_ : Optional[Any] = model(a_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def lowerCamelCase ( self : Optional[Any] , a_ : Optional[int] , a_ : int , a_ : List[str] , a_ : int , a_ : Tuple , a_ : int , a_ : List[Any] ):
lowerCAmelCase_ : List[Any] = DebertaForMaskedLM(config=a_ )
model.to(a_ )
model.eval()
lowerCAmelCase_ : Dict = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase ( self : str , a_ : List[str] , a_ : Tuple , a_ : Any , a_ : str , a_ : List[Any] , a_ : Tuple , a_ : Union[str, Any] ):
lowerCAmelCase_ : str = self.num_labels
lowerCAmelCase_ : List[str] = DebertaForSequenceClassification(a_ )
model.to(a_ )
model.eval()
lowerCAmelCase_ : str = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(a_ )
def lowerCamelCase ( self : Any , a_ : Union[str, Any] , a_ : Any , a_ : str , a_ : int , a_ : Dict , a_ : int , a_ : Tuple ):
lowerCAmelCase_ : int = self.num_labels
lowerCAmelCase_ : Any = DebertaForTokenClassification(config=a_ )
model.to(a_ )
model.eval()
lowerCAmelCase_ : int = model(a_ , attention_mask=a_ , token_type_ids=a_ , labels=a_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase ( self : Dict , a_ : Dict , a_ : Optional[int] , a_ : Union[str, Any] , a_ : Any , a_ : List[Any] , a_ : int , a_ : str ):
lowerCAmelCase_ : Optional[int] = DebertaForQuestionAnswering(config=a_ )
model.to(a_ )
model.eval()
lowerCAmelCase_ : Tuple = model(
a_ , attention_mask=a_ , token_type_ids=a_ , start_positions=a_ , end_positions=a_ , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase ( self : str ):
lowerCAmelCase_ : Tuple = self.prepare_config_and_inputs()
(
(
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) , (
lowerCAmelCase_
) ,
) : List[Any] = config_and_inputs
lowerCAmelCase_ : str = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class __lowerCamelCase ( A__ , A__ , unittest.TestCase ):
'''simple docstring'''
a_ : int = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
a_ : Dict = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ : List[Any] = True
a_ : Dict = False
a_ : int = False
a_ : str = False
a_ : List[Any] = False
def lowerCamelCase ( self : List[str] ):
lowerCAmelCase_ : Union[str, Any] = DebertaModelTester(self )
lowerCAmelCase_ : List[Any] = ConfigTester(self , config_class=a_ , hidden_size=37 )
def lowerCamelCase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*a_ )
def lowerCamelCase ( self : List[str] ):
lowerCAmelCase_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*a_ )
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*a_ )
def lowerCamelCase ( self : Optional[int] ):
lowerCAmelCase_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*a_ )
def lowerCamelCase ( self : Tuple ):
lowerCAmelCase_ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*a_ )
@slow
def lowerCamelCase ( self : Optional[Any] ):
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase_ : Union[str, Any] = DebertaModel.from_pretrained(a_ )
self.assertIsNotNone(a_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class __lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason="Model not available yet" )
def lowerCamelCase ( self : Union[str, Any] ):
pass
@slow
def lowerCamelCase ( self : str ):
lowerCAmelCase_ : int = DebertaModel.from_pretrained("microsoft/deberta-base" )
lowerCAmelCase_ : str = torch.tensor([[0, 3_14_14, 2_32, 3_28, 7_40, 11_40, 1_26_95, 69, 4_60_78, 15_88, 2]] )
lowerCAmelCase_ : Dict = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase_ : Dict = model(a_ , attention_mask=a_ )[0]
# compare the actual values for a slice.
lowerCAmelCase_ : Optional[Any] = torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a_ , atol=1e-4 ) , f'''{output[:, 1:4, 1:4]}''' )
| 241 | 1 |
from collections.abc import Callable
import numpy as np
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A : int = int(np.ceil((x_end - xa) / step_size ) )
A : Tuple = np.zeros((n + 1,) )
A : Optional[Any] = ya
A : Dict = xa
for k in range(_lowerCamelCase ):
A : Any = y[k] + step_size * ode_func(_lowerCamelCase , y[k] )
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 256 |
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
A , A : Optional[Any] = len(_lowerCamelCase ), len(grid[0] )
if (
min(_lowerCamelCase , _lowerCamelCase ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
A : Tuple = 0
count += depth_first_search(_lowerCamelCase , row + 1 , _lowerCamelCase , _lowerCamelCase )
count += depth_first_search(_lowerCamelCase , row - 1 , _lowerCamelCase , _lowerCamelCase )
count += depth_first_search(_lowerCamelCase , _lowerCamelCase , col + 1 , _lowerCamelCase )
count += depth_first_search(_lowerCamelCase , _lowerCamelCase , col - 1 , _lowerCamelCase )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 256 | 1 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class _SCREAMING_SNAKE_CASE ( unittest.TestCase):
def _snake_case ( self )-> Optional[int]:
lowerCamelCase_ =tempfile.mkdtemp()
# fmt: off
lowerCamelCase_ =["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
lowerCamelCase_ =dict(zip(__UpperCAmelCase , range(len(__UpperCAmelCase ) ) ) )
lowerCamelCase_ =["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
lowerCamelCase_ ={"""unk_token""": """<unk>"""}
lowerCamelCase_ =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
lowerCamelCase_ =os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(__UpperCAmelCase ) + """\n""" )
with open(self.merges_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(__UpperCAmelCase ) )
lowerCamelCase_ ={
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"""image_std""": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
lowerCamelCase_ =os.path.join(self.tmpdirname , __UpperCAmelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
def _snake_case ( self , **_SCREAMING_SNAKE_CASE )-> Tuple:
return CLIPTokenizer.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def _snake_case ( self , **_SCREAMING_SNAKE_CASE )-> int:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def _snake_case ( self , **_SCREAMING_SNAKE_CASE )-> str:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase )
def _snake_case ( self )-> Any:
shutil.rmtree(self.tmpdirname )
def _snake_case ( self )-> Dict:
lowerCamelCase_ =[np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
lowerCamelCase_ =[Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self )-> Optional[int]:
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =self.get_rust_tokenizer()
lowerCamelCase_ =self.get_image_processor()
lowerCamelCase_ =CLIPSegProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
lowerCamelCase_ =CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCAmelCase )
lowerCamelCase_ =CLIPSegProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
lowerCamelCase_ =CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer , __UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor , __UpperCAmelCase )
def _snake_case ( self )-> Union[str, Any]:
lowerCamelCase_ =CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
lowerCamelCase_ =self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
lowerCamelCase_ =self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
lowerCamelCase_ =CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def _snake_case ( self )-> List[str]:
lowerCamelCase_ =self.get_image_processor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =CLIPSegProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCamelCase_ =self.prepare_image_inputs()
lowerCamelCase_ =image_processor(__UpperCAmelCase , return_tensors="""np""" )
lowerCamelCase_ =processor(images=__UpperCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _snake_case ( self )-> Any:
lowerCamelCase_ =self.get_image_processor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =CLIPSegProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCamelCase_ ="""lower newer"""
lowerCamelCase_ =processor(text=__UpperCAmelCase )
lowerCamelCase_ =tokenizer(__UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _snake_case ( self )-> int:
lowerCamelCase_ =self.get_image_processor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =CLIPSegProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCamelCase_ ="""lower newer"""
lowerCamelCase_ =self.prepare_image_inputs()
lowerCamelCase_ =processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def _snake_case ( self )-> List[Any]:
lowerCamelCase_ =self.get_image_processor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =CLIPSegProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCamelCase_ =self.prepare_image_inputs()
lowerCamelCase_ =self.prepare_image_inputs()
lowerCamelCase_ =processor(images=__UpperCAmelCase , visual_prompt=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """conditional_pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def _snake_case ( self )-> Optional[Any]:
lowerCamelCase_ =self.get_image_processor()
lowerCamelCase_ =self.get_tokenizer()
lowerCamelCase_ =CLIPSegProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
lowerCamelCase_ =[[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
lowerCamelCase_ =processor.batch_decode(__UpperCAmelCase )
lowerCamelCase_ =tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
| 154 |
'''simple docstring'''
def lowercase_ ( lowerCAmelCase__ : int ):
"""simple docstring"""
__UpperCAmelCase : list[list[int]] = [[0 for _ in range(lowerCAmelCase__ )] for _ in range(m + 1 )]
for i in range(m + 1 ):
__UpperCAmelCase : str = 1
for n in range(m + 1 ):
for k in range(1 , lowerCAmelCase__ ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
_UpperCamelCase = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
_UpperCamelCase = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 254 | 0 |
import warnings
from ...utils import logging
from .image_processing_poolformer import PoolFormerImageProcessor
_snake_case = logging.get_logger(__name__)
class UpperCAmelCase_ ( _lowerCAmelCase):
def __init__( self, *__a, **__a):
'''simple docstring'''
warnings.warn(
"The class PoolFormerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use PoolFormerImageProcessor instead.", SCREAMING_SNAKE_CASE_, )
super().__init__(*SCREAMING_SNAKE_CASE_, **SCREAMING_SNAKE_CASE_)
| 366 |
from __future__ import annotations
from typing import Any
class UpperCAmelCase_ :
def __init__( self, __a, __a, __a = 0):
'''simple docstring'''
_lowerCAmelCase , _lowerCAmelCase : int = row, column
_lowerCAmelCase : str = [[default_value for c in range(__a)] for r in range(__a)]
def __str__( self):
'''simple docstring'''
_lowerCAmelCase : Tuple = f"Matrix consist of {self.row} rows and {self.column} columns\n"
# Make string identifier
_lowerCAmelCase : str = 0
for row_vector in self.array:
for obj in row_vector:
_lowerCAmelCase : List[str] = max(__a, len(str(__a)))
_lowerCAmelCase : Union[str, Any] = f"%{max_element_length}s"
# Make string and return
def single_line(__a) -> str:
nonlocal string_format_identifier
_lowerCAmelCase : Dict = "["
line += ", ".join(string_format_identifier % (obj,) for obj in row_vector)
line += "]"
return line
s += "\n".join(single_line(__a) for row_vector in self.array)
return s
def __repr__( self):
'''simple docstring'''
return str(self)
def snake_case__ ( self, __a):
'''simple docstring'''
if not (isinstance(__a, (list, tuple)) and len(__a) == 2):
return False
elif not (0 <= loc[0] < self.row and 0 <= loc[1] < self.column):
return False
else:
return True
def __getitem__( self, __a):
'''simple docstring'''
assert self.validate_indicies(__a)
return self.array[loc[0]][loc[1]]
def __setitem__( self, __a, __a):
'''simple docstring'''
assert self.validate_indicies(__a)
_lowerCAmelCase : Union[str, Any] = value
def __add__( self, __a):
'''simple docstring'''
assert isinstance(__a, __a)
assert self.row == another.row and self.column == another.column
# Add
_lowerCAmelCase : Any = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Any = self[r, c] + another[r, c]
return result
def __neg__( self):
'''simple docstring'''
_lowerCAmelCase : List[str] = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : str = -self[r, c]
return result
def __sub__( self, __a):
'''simple docstring'''
return self + (-another)
def __mul__( self, __a):
'''simple docstring'''
if isinstance(__a, (int, float)): # Scalar multiplication
_lowerCAmelCase : Dict = Matrix(self.row, self.column)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Optional[Any] = self[r, c] * another
return result
elif isinstance(__a, __a): # Matrix multiplication
assert self.column == another.row
_lowerCAmelCase : List[str] = Matrix(self.row, another.column)
for r in range(self.row):
for c in range(another.column):
for i in range(self.column):
result[r, c] += self[r, i] * another[i, c]
return result
else:
_lowerCAmelCase : Optional[Any] = f"Unsupported type given for another ({type(__a)})"
raise TypeError(__a)
def snake_case__ ( self):
'''simple docstring'''
_lowerCAmelCase : Optional[Any] = Matrix(self.column, self.row)
for r in range(self.row):
for c in range(self.column):
_lowerCAmelCase : Any = self[r, c]
return result
def snake_case__ ( self, __a, __a):
'''simple docstring'''
assert isinstance(__a, __a) and isinstance(__a, __a)
assert self.row == self.column == u.row == v.row # u, v should be column vector
assert u.column == v.column == 1 # u, v should be column vector
# Calculate
_lowerCAmelCase : int = v.transpose()
_lowerCAmelCase : str = (v_t * self * u)[0, 0] + 1
if numerator_factor == 0:
return None # It's not invertable
return self - ((self * u) * (v_t * self) * (1.0 / numerator_factor))
# Testing
if __name__ == "__main__":
def A ( ):
'''simple docstring'''
_lowerCAmelCase : List[Any] = Matrix(3 , 3 , 0 )
for i in range(3 ):
_lowerCAmelCase : Union[str, Any] = 1
print(F"a^(-1) is {ainv}" )
# u, v
_lowerCAmelCase : Any = Matrix(3 , 1 , 0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : Optional[int] = 1, 2, -3
_lowerCAmelCase : List[Any] = Matrix(3 , 1 , 0 )
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase : str = 4, -2, 5
print(F"u is {u}" )
print(F"v is {v}" )
print(F"uv^T is {u * v.transpose()}" )
# Sherman Morrison
print(F"(a + uv^T)^(-1) is {ainv.sherman_morrison(_lowerCamelCase , _lowerCamelCase )}" )
def A ( ):
'''simple docstring'''
import doctest
doctest.testmod()
testa()
| 300 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.