code stringlengths 86 54.5k | code_codestyle int64 0 371 | style_context stringlengths 87 49.2k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
_a = {
'''configuration_mobilevit''': ['''MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MobileViTConfig''', '''MobileViTOnnxConfig'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ['''MobileViTFeatureExtractor''']
_a = ['''MobileViTImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'''MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileViTForImageClassification''',
'''MobileViTForSemanticSegmentation''',
'''MobileViTModel''',
'''MobileViTPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'''TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileViTForImageClassification''',
'''TFMobileViTForSemanticSegmentation''',
'''TFMobileViTModel''',
'''TFMobileViTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 39 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""FNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""FNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""FNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FNetForMaskedLM""",
"""FNetForMultipleChoice""",
"""FNetForNextSentencePrediction""",
"""FNetForPreTraining""",
"""FNetForQuestionAnswering""",
"""FNetForSequenceClassification""",
"""FNetForTokenClassification""",
"""FNetLayer""",
"""FNetModel""",
"""FNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 86 | 0 |
'''simple docstring'''
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
__a = logging.get_logger(__name__) # pylint: disable=invalid-name
__a = 256
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = ["melgan"]
def __init__( self : Dict , snake_case_ : SpectrogramNotesEncoder , snake_case_ : SpectrogramContEncoder , snake_case_ : TaFilmDecoder , snake_case_ : DDPMScheduler , snake_case_ : OnnxRuntimeModel if is_onnx_available() else Any , ):
super().__init__()
# From MELGAN
snake_case__ : str = math.log(1E-5 ) # Matches MelGAN training.
snake_case__ : Optional[Any] = 4.0 # Largest value for most examples
snake_case__ : List[str] = 128
self.register_modules(
notes_encoder=snake_case_ , continuous_encoder=snake_case_ , decoder=snake_case_ , scheduler=snake_case_ , melgan=snake_case_ , )
def lowerCamelCase ( self : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : Tuple=(-1.0, 1.0) , snake_case_ : Optional[Any]=False ):
snake_case__ , snake_case__ : int = output_range
if clip:
snake_case__ : int = torch.clip(snake_case_ , self.min_value , self.max_value )
# Scale to [0, 1].
snake_case__ : Any = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def lowerCamelCase ( self : Union[str, Any] , snake_case_ : Optional[int] , snake_case_ : List[str]=(-1.0, 1.0) , snake_case_ : Any=False ):
snake_case__ , snake_case__ : List[Any] = input_range
snake_case__ : Union[str, Any] = torch.clip(snake_case_ , snake_case_ , snake_case_ ) if clip else outputs
# Scale to [0, 1].
snake_case__ : Union[str, Any] = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def lowerCamelCase ( self : Dict , snake_case_ : Tuple , snake_case_ : List[str] , snake_case_ : Dict ):
snake_case__ : int = input_tokens > 0
snake_case__ , snake_case__ : Union[str, Any] = self.notes_encoder(
encoder_input_tokens=snake_case_ , encoder_inputs_mask=snake_case_ )
snake_case__ , snake_case__ : Tuple = self.continuous_encoder(
encoder_inputs=snake_case_ , encoder_inputs_mask=snake_case_ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def lowerCamelCase ( self : List[Any] , snake_case_ : Optional[Any] , snake_case_ : List[str] , snake_case_ : Optional[int] ):
snake_case__ : str = noise_time
if not torch.is_tensor(snake_case_ ):
snake_case__ : int = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(snake_case_ ) and len(timesteps.shape ) == 0:
snake_case__ : str = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
snake_case__ : Optional[int] = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
snake_case__ : Dict = self.decoder(
encodings_and_masks=snake_case_ , decoder_input_tokens=snake_case_ , decoder_noise_time=snake_case_ )
return logits
@torch.no_grad()
def __call__( self : Tuple , snake_case_ : List[List[int]] , snake_case_ : Optional[torch.Generator] = None , snake_case_ : int = 100 , snake_case_ : bool = True , snake_case_ : str = "numpy" , snake_case_ : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , snake_case_ : int = 1 , ):
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(snake_case_ , snake_case_ ) or callback_steps <= 0)
):
raise ValueError(
f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
f" {type(snake_case_ )}." )
snake_case__ : str = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
snake_case__ : int = np.zeros([1, 0, self.n_dims] , np.floataa )
snake_case__ : Union[str, Any] = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=snake_case_ , device=self.device )
for i, encoder_input_tokens in enumerate(snake_case_ ):
if i == 0:
snake_case__ : str = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
snake_case__ : Union[str, Any] = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=snake_case_ , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
snake_case__ : List[Any] = ones
snake_case__ : List[str] = self.scale_features(
snake_case_ , output_range=[-1.0, 1.0] , clip=snake_case_ )
snake_case__ : Dict = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=snake_case_ , continuous_mask=snake_case_ , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
snake_case__ : Dict = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=snake_case_ , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(snake_case_ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
snake_case__ : str = self.decode(
encodings_and_masks=snake_case_ , input_tokens=snake_case_ , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
snake_case__ : List[Any] = self.scheduler.step(snake_case_ , snake_case_ , snake_case_ , generator=snake_case_ ).prev_sample
snake_case__ : Optional[int] = self.scale_to_features(snake_case_ , input_range=[-1.0, 1.0] )
snake_case__ : Optional[Any] = mel[:1]
snake_case__ : Dict = mel.cpu().float().numpy()
snake_case__ : List[Any] = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(snake_case_ , snake_case_ )
logger.info("""Generated segment""" , snake_case_ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"""Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'.""" )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"""Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'.""" )
if output_type == "numpy":
snake_case__ : Tuple = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
snake_case__ : Tuple = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=snake_case_ )
| 43 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert import BertTokenizer
__a = logging.get_logger(__name__)
__a = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
__a = {
"vocab_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-ctx_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-ctx_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
__a = {
"vocab_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-question_encoder-single-nq-base": (
"https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-question_encoder-multiset-base": (
"https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json"
),
},
}
__a = {
"vocab_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"facebook/dpr-reader-single-nq-base": (
"https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json"
),
"facebook/dpr-reader-multiset-base": (
"https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json"
),
},
}
__a = {
"facebook/dpr-ctx_encoder-single-nq-base": 512,
"facebook/dpr-ctx_encoder-multiset-base": 512,
}
__a = {
"facebook/dpr-question_encoder-single-nq-base": 512,
"facebook/dpr-question_encoder-multiset-base": 512,
}
__a = {
"facebook/dpr-reader-single-nq-base": 512,
"facebook/dpr-reader-multiset-base": 512,
}
__a = {
"facebook/dpr-ctx_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-ctx_encoder-multiset-base": {"do_lower_case": True},
}
__a = {
"facebook/dpr-question_encoder-single-nq-base": {"do_lower_case": True},
"facebook/dpr-question_encoder-multiset-base": {"do_lower_case": True},
}
__a = {
"facebook/dpr-reader-single-nq-base": {"do_lower_case": True},
"facebook/dpr-reader-multiset-base": {"do_lower_case": True},
}
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
class UpperCAmelCase_ ( _a ):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
lowercase = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
__a = collections.namedtuple(
"DPRSpanPrediction", ["span_score", "relevance_score", "doc_id", "start_index", "end_index", "text"]
)
__a = collections.namedtuple("DPRReaderOutput", ["start_logits", "end_logits", "relevance_logits"])
__a = R"\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n ```\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n ```\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `'longest'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `'max_length'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `'do_not_pad'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `'longest_first'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `'only_first'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `'only_second'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `'do_not_truncate'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `'tf'`: Return TensorFlow `tf.constant` objects.\n - `'pt'`: Return PyTorch `torch.Tensor` objects.\n - `'np'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer's default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Returns:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n "
@add_start_docstrings(_a )
class UpperCAmelCase_ :
"""simple docstring"""
def __call__( self : str , snake_case_ : Optional[Any] , snake_case_ : Optional[str] = None , snake_case_ : Optional[str] = None , snake_case_ : Union[bool, str] = False , snake_case_ : Union[bool, str] = False , snake_case_ : Optional[int] = None , snake_case_ : Optional[Union[str, TensorType]] = None , snake_case_ : Optional[bool] = None , **snake_case_ : Union[str, Any] , ):
if titles is None and texts is None:
return super().__call__(
snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , return_tensors=snake_case_ , return_attention_mask=snake_case_ , **snake_case_ , )
elif titles is None or texts is None:
snake_case__ : int = titles if texts is None else texts
return super().__call__(
snake_case_ , snake_case_ , padding=snake_case_ , truncation=snake_case_ , max_length=snake_case_ , return_tensors=snake_case_ , return_attention_mask=snake_case_ , **snake_case_ , )
snake_case__ : List[str] = titles if not isinstance(snake_case_ , snake_case_ ) else [titles]
snake_case__ : Union[str, Any] = texts if not isinstance(snake_case_ , snake_case_ ) else [texts]
snake_case__ : Dict = len(snake_case_ )
snake_case__ : Union[str, Any] = questions if not isinstance(snake_case_ , snake_case_ ) else [questions] * n_passages
if len(snake_case_ ) != len(snake_case_ ):
raise ValueError(
f"There should be as many titles than texts but got {len(snake_case_ )} titles and {len(snake_case_ )} texts." )
snake_case__ : int = super().__call__(snake_case_ , snake_case_ , padding=snake_case_ , truncation=snake_case_ )["""input_ids"""]
snake_case__ : Any = super().__call__(snake_case_ , add_special_tokens=snake_case_ , padding=snake_case_ , truncation=snake_case_ )["""input_ids"""]
snake_case__ : Dict = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(snake_case_ , snake_case_ )
]
}
if return_attention_mask is not False:
snake_case__ : List[Any] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
snake_case__ : Union[str, Any] = attention_mask
return self.pad(snake_case_ , padding=snake_case_ , max_length=snake_case_ , return_tensors=snake_case_ )
def lowerCamelCase ( self : Optional[int] , snake_case_ : BatchEncoding , snake_case_ : DPRReaderOutput , snake_case_ : int = 16 , snake_case_ : int = 64 , snake_case_ : int = 4 , ):
snake_case__ : Optional[int] = reader_input["""input_ids"""]
snake_case__ , snake_case__ , snake_case__ : List[str] = reader_output[:3]
snake_case__ : Union[str, Any] = len(snake_case_ )
snake_case__ : Tuple = sorted(range(snake_case_ ) , reverse=snake_case_ , key=relevance_logits.__getitem__ )
snake_case__ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
snake_case__ : Union[str, Any] = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
snake_case__ : Optional[Any] = sequence_ids.index(self.sep_token_id , 2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
snake_case__ : int = sequence_ids.index(self.pad_token_id )
else:
snake_case__ : int = len(snake_case_ )
snake_case__ : Optional[int] = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] , end_logits=end_logits[doc_id][passage_offset:sequence_len] , max_answer_length=snake_case_ , top_spans=snake_case_ , )
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] , relevance_score=relevance_logits[doc_id] , doc_id=snake_case_ , start_index=snake_case_ , end_index=snake_case_ , text=self.decode(sequence_ids[start_index : end_index + 1] ) , ) )
if len(snake_case_ ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def lowerCamelCase ( self : str , snake_case_ : List[int] , snake_case_ : List[int] , snake_case_ : int , snake_case_ : int , ):
snake_case__ : List[str] = []
for start_index, start_score in enumerate(snake_case_ ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
snake_case__ : Any = sorted(snake_case_ , key=lambda snake_case_ : x[1] , reverse=snake_case_ )
snake_case__ : Optional[Any] = []
for (start_index, end_index), score in scores:
if start_index > end_index:
raise ValueError(f"Wrong span indices: [{start_index}:{end_index}]" )
snake_case__ : Union[str, Any] = end_index - start_index + 1
if length > max_answer_length:
raise ValueError(f"Span is too long: {length} > {max_answer_length}" )
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(snake_case_ ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(_a )
class UpperCAmelCase_ ( _a , _a ):
"""simple docstring"""
lowercase = VOCAB_FILES_NAMES
lowercase = READER_PRETRAINED_VOCAB_FILES_MAP
lowercase = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = READER_PRETRAINED_INIT_CONFIGURATION
lowercase = ["input_ids", "attention_mask"]
| 43 | 1 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def a_ ( __snake_case : Optional[int] ) -> Dict:
"""simple docstring"""
lowerCamelCase_, lowerCamelCase_ =image.size
lowerCamelCase_, lowerCamelCase_ =(x - x % 32 for x in (w, h)) # resize to integer multiple of 32
lowerCamelCase_ =image.resize((w, h) , resample=PIL_INTERPOLATION['''lanczos'''] )
lowerCamelCase_ =np.array(__snake_case ).astype(np.floataa ) / 2_5_5.0
lowerCamelCase_ =image[None].transpose(0 , 3 , 1 , 2 )
lowerCamelCase_ =torch.from_numpy(__snake_case )
return 2.0 * image - 1.0
class __UpperCamelCase ( _lowerCamelCase ):
def __init__( self, lowerCAmelCase, lowerCAmelCase, lowerCAmelCase, ):
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=_UpperCamelCase, unet=_UpperCamelCase, scheduler=_UpperCamelCase )
@torch.no_grad()
def __call__( self, lowerCAmelCase = None, lowerCAmelCase = 1, lowerCAmelCase = 100, lowerCAmelCase = 0.0, lowerCAmelCase = None, lowerCAmelCase = "pil", lowerCAmelCase = True, ):
"""simple docstring"""
if isinstance(_UpperCamelCase, PIL.Image.Image ):
lowerCamelCase_ =1
elif isinstance(_UpperCamelCase, torch.Tensor ):
lowerCamelCase_ =image.shape[0]
else:
raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_UpperCamelCase )}''' )
if isinstance(_UpperCamelCase, PIL.Image.Image ):
lowerCamelCase_ =preprocess(_UpperCamelCase )
lowerCamelCase_, lowerCamelCase_ =image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
lowerCamelCase_ =(batch_size, self.unet.config.in_channels // 2, height, width)
lowerCamelCase_ =next(self.unet.parameters() ).dtype
lowerCamelCase_ =randn_tensor(_UpperCamelCase, generator=_UpperCamelCase, device=self.device, dtype=_UpperCamelCase )
lowerCamelCase_ =image.to(device=self.device, dtype=_UpperCamelCase )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(_UpperCamelCase, device=self.device )
lowerCamelCase_ =self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
lowerCamelCase_ =latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCamelCase_ ='''eta''' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCamelCase_ ={}
if accepts_eta:
lowerCamelCase_ =eta
for t in self.progress_bar(_UpperCamelCase ):
# concat latents and low resolution image in the channel dimension.
lowerCamelCase_ =torch.cat([latents, image], dim=1 )
lowerCamelCase_ =self.scheduler.scale_model_input(_UpperCamelCase, _UpperCamelCase )
# predict the noise residual
lowerCamelCase_ =self.unet(_UpperCamelCase, _UpperCamelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCamelCase_ =self.scheduler.step(_UpperCamelCase, _UpperCamelCase, _UpperCamelCase, **_UpperCamelCase ).prev_sample
# decode the image latents with the VQVAE
lowerCamelCase_ =self.vqvae.decode(_UpperCamelCase ).sample
lowerCamelCase_ =torch.clamp(_UpperCamelCase, -1.0, 1.0 )
lowerCamelCase_ =image / 2 + 0.5
lowerCamelCase_ =image.cpu().permute(0, 2, 3, 1 ).numpy()
if output_type == "pil":
lowerCamelCase_ =self.numpy_to_pil(_UpperCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_UpperCamelCase )
| 75 |
import numpy as np
from PIL import Image
def _a ( lowerCamelCase: np.ndarray , lowerCamelCase: int , lowerCamelCase: int ) -> np.ndarray:
'''simple docstring'''
__A = np.array(lowerCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
__A = 0
__A = 0
__A = 0
__A = 0
# compute the shape of the output matrix
__A = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape maxpool_shape
__A = np.zeros((maxpool_shape, maxpool_shape) )
while i < arr.shape[0]:
if i + size > arr.shape[0]:
# if the end of the matrix is reached, break
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the maximum of the pooling matrix
__A = np.max(arr[i : i + size, j : j + size] )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__A = 0
__A = 0
return updated_arr
def _a ( lowerCamelCase: np.ndarray , lowerCamelCase: int , lowerCamelCase: int ) -> np.ndarray:
'''simple docstring'''
__A = np.array(lowerCamelCase )
if arr.shape[0] != arr.shape[1]:
raise ValueError('''The input array is not a square matrix''' )
__A = 0
__A = 0
__A = 0
__A = 0
# compute the shape of the output matrix
__A = (arr.shape[0] - size) // stride + 1
# initialize the output matrix with zeros of shape avgpool_shape
__A = np.zeros((avgpool_shape, avgpool_shape) )
while i < arr.shape[0]:
# if the end of the matrix is reached, break
if i + size > arr.shape[0]:
break
while j < arr.shape[1]:
# if the end of the matrix is reached, break
if j + size > arr.shape[1]:
break
# compute the average of the pooling matrix
__A = int(np.average(arr[i : i + size, j : j + size] ) )
# shift the pooling matrix by stride of column pixels
j += stride
mat_j += 1
# shift the pooling matrix by stride of row pixels
i += stride
mat_i += 1
# reset the column index to 0
__A = 0
__A = 0
return updated_arr
# Main Function
if __name__ == "__main__":
from doctest import testmod
testmod(name='avgpooling', verbose=True)
# Loading the image
snake_case__ : Optional[Any] = Image.open('path_to_image')
# Converting the image to numpy array and maxpooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(maxpooling(np.array(image), size=3, stride=2)).show()
# Converting the image to numpy array and averagepooling, displaying the result
# Ensure that the image is a square matrix
Image.fromarray(avgpooling(np.array(image), size=3, stride=2)).show()
| 117 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase = {
"""configuration_luke""": ["""LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LukeConfig"""],
"""tokenization_luke""": ["""LukeTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase = [
"""LUKE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LukeForEntityClassification""",
"""LukeForEntityPairClassification""",
"""LukeForEntitySpanClassification""",
"""LukeForMultipleChoice""",
"""LukeForQuestionAnswering""",
"""LukeForSequenceClassification""",
"""LukeForTokenClassification""",
"""LukeForMaskedLM""",
"""LukeModel""",
"""LukePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
lowerCamelCase = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 48 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, List, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {
"""Salesforce/codegen-350M-nl""": """https://huggingface.co/Salesforce/codegen-350M-nl/resolve/main/config.json""",
"""Salesforce/codegen-350M-multi""": """https://huggingface.co/Salesforce/codegen-350M-multi/resolve/main/config.json""",
"""Salesforce/codegen-350M-mono""": """https://huggingface.co/Salesforce/codegen-350M-mono/resolve/main/config.json""",
"""Salesforce/codegen-2B-nl""": """https://huggingface.co/Salesforce/codegen-2B-nl/resolve/main/config.json""",
"""Salesforce/codegen-2B-multi""": """https://huggingface.co/Salesforce/codegen-2B-multi/resolve/main/config.json""",
"""Salesforce/codegen-2B-mono""": """https://huggingface.co/Salesforce/codegen-2B-mono/resolve/main/config.json""",
"""Salesforce/codegen-6B-nl""": """https://huggingface.co/Salesforce/codegen-6B-nl/resolve/main/config.json""",
"""Salesforce/codegen-6B-multi""": """https://huggingface.co/Salesforce/codegen-6B-multi/resolve/main/config.json""",
"""Salesforce/codegen-6B-mono""": """https://huggingface.co/Salesforce/codegen-6B-mono/resolve/main/config.json""",
"""Salesforce/codegen-16B-nl""": """https://huggingface.co/Salesforce/codegen-16B-nl/resolve/main/config.json""",
"""Salesforce/codegen-16B-multi""": """https://huggingface.co/Salesforce/codegen-16B-multi/resolve/main/config.json""",
"""Salesforce/codegen-16B-mono""": """https://huggingface.co/Salesforce/codegen-16B-mono/resolve/main/config.json""",
}
class _UpperCamelCase ( A ):
'''simple docstring'''
lowerCAmelCase__ = """codegen"""
lowerCAmelCase__ = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Union[str, Any] , _lowerCAmelCase : List[Any]=5_0_4_0_0 , _lowerCAmelCase : Tuple=2_0_4_8 , _lowerCAmelCase : Dict=2_0_4_8 , _lowerCAmelCase : Tuple=4_0_9_6 , _lowerCAmelCase : Any=2_8 , _lowerCAmelCase : Optional[int]=1_6 , _lowerCAmelCase : int=6_4 , _lowerCAmelCase : List[Any]=None , _lowerCAmelCase : List[str]="gelu_new" , _lowerCAmelCase : str=0.0 , _lowerCAmelCase : List[str]=0.0 , _lowerCAmelCase : Any=0.0 , _lowerCAmelCase : Optional[int]=1e-5 , _lowerCAmelCase : Any=0.02 , _lowerCAmelCase : int=True , _lowerCAmelCase : str=5_0_2_5_6 , _lowerCAmelCase : Any=5_0_2_5_6 , _lowerCAmelCase : Union[str, Any]=False , **_lowerCAmelCase : Dict , ):
'''simple docstring'''
__lowercase =vocab_size
__lowercase =n_ctx
__lowercase =n_positions
__lowercase =n_embd
__lowercase =n_layer
__lowercase =n_head
__lowercase =n_inner
__lowercase =rotary_dim
__lowercase =activation_function
__lowercase =resid_pdrop
__lowercase =embd_pdrop
__lowercase =attn_pdrop
__lowercase =layer_norm_epsilon
__lowercase =initializer_range
__lowercase =use_cache
__lowercase =bos_token_id
__lowercase =eos_token_id
super().__init__(
bos_token_id=_lowerCAmelCase , eos_token_id=_lowerCAmelCase , tie_word_embeddings=_lowerCAmelCase , **_lowerCAmelCase)
class _UpperCamelCase ( A ):
'''simple docstring'''
def __init__( self : Tuple , _lowerCAmelCase : PretrainedConfig , _lowerCAmelCase : str = "default" , _lowerCAmelCase : List[PatchingSpec] = None , _lowerCAmelCase : bool = False , ):
'''simple docstring'''
super().__init__(_lowerCAmelCase , task=_lowerCAmelCase , patching_specs=_lowerCAmelCase , use_past=_lowerCAmelCase)
if not getattr(self._config , 'pad_token_id' , _lowerCAmelCase):
# TODO: how to do that better?
__lowercase =0
@property
def __lowerCamelCase ( self : Tuple):
'''simple docstring'''
__lowercase =OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}})
if self.use_past:
self.fill_with_past_key_values_(_lowerCAmelCase , direction='inputs')
__lowercase ={0: 'batch', 1: 'past_sequence + sequence'}
else:
__lowercase ={0: 'batch', 1: 'sequence'}
return common_inputs
@property
def __lowerCamelCase ( self : Dict):
'''simple docstring'''
return self._config.n_layer
@property
def __lowerCamelCase ( self : List[str]):
'''simple docstring'''
return self._config.n_head
def __lowerCamelCase ( self : Optional[Any] , _lowerCAmelCase : PreTrainedTokenizer , _lowerCAmelCase : int = -1 , _lowerCAmelCase : int = -1 , _lowerCAmelCase : bool = False , _lowerCAmelCase : Optional[TensorType] = None , ):
'''simple docstring'''
__lowercase =super(_lowerCAmelCase , self).generate_dummy_inputs(
_lowerCAmelCase , batch_size=_lowerCAmelCase , seq_length=_lowerCAmelCase , is_pair=_lowerCAmelCase , framework=_lowerCAmelCase)
# We need to order the input in the way they appears in the forward()
__lowercase =OrderedDict({'input_ids': common_inputs['input_ids']})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.')
else:
import torch
__lowercase , __lowercase =common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__lowercase =seqlen + 2
__lowercase =(
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__lowercase =[
(torch.zeros(_lowerCAmelCase), torch.zeros(_lowerCAmelCase)) for _ in range(self.num_layers)
]
__lowercase =common_inputs['attention_mask']
if self.use_past:
__lowercase =ordered_inputs['attention_mask'].dtype
__lowercase =torch.cat(
[ordered_inputs['attention_mask'], torch.ones(_lowerCAmelCase , _lowerCAmelCase , dtype=_lowerCAmelCase)] , dim=1)
return ordered_inputs
@property
def __lowerCamelCase ( self : Optional[Any]):
'''simple docstring'''
return 1_3
| 48 | 1 |
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( __snake_case : int , __snake_case : int ):
'''simple docstring'''
return int((input_a, input_a).count(1 ) != 0 )
def _SCREAMING_SNAKE_CASE ( ):
'''simple docstring'''
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 220 |
"""simple docstring"""
import copy
import json
import os
import tempfile
from transformers import is_torch_available
from .test_configuration_utils import config_common_kwargs
class a ( a_ ):
def __init__( self , _lowerCamelCase , _lowerCamelCase=None , _lowerCamelCase=True , _lowerCamelCase=None , **_lowerCamelCase ):
lowercase = parent
lowercase = config_class
lowercase = has_text_modality
lowercase = kwargs
lowercase = common_properties
def UpperCamelCase_ ( self ):
lowercase = self.config_class(**self.inputs_dict )
lowercase = (
['hidden_size', 'num_attention_heads', 'num_hidden_layers']
if self.common_properties is None
else self.common_properties
)
# Add common fields for text models
if self.has_text_modality:
common_properties.extend(['vocab_size'] )
# Test that config has the common properties as getters
for prop in common_properties:
self.parent.assertTrue(hasattr(_lowerCamelCase , _lowerCamelCase ) , msg=F'`{prop}` does not exist' )
# Test that config has the common properties as setter
for idx, name in enumerate(_lowerCamelCase ):
try:
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
self.parent.assertEqual(
getattr(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , msg=F'`{name} value {idx} expected, but was {getattr(_lowerCamelCase , _lowerCamelCase )}' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
# Test if config class can be called with Config(prop_name=..)
for idx, name in enumerate(_lowerCamelCase ):
try:
lowercase = self.config_class(**{name: idx} )
self.parent.assertEqual(
getattr(_lowerCamelCase , _lowerCamelCase ) , _lowerCamelCase , msg=F'`{name} value {idx} expected, but was {getattr(_lowerCamelCase , _lowerCamelCase )}' )
except NotImplementedError:
# Some models might not be able to implement setters for common_properties
# In that case, a NotImplementedError is raised
pass
def UpperCamelCase_ ( self ):
lowercase = self.config_class(**self.inputs_dict )
lowercase = json.loads(config.to_json_string() )
for key, value in self.inputs_dict.items():
self.parent.assertEqual(obj[key] , _lowerCamelCase )
def UpperCamelCase_ ( self ):
lowercase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase = os.path.join(_lowerCamelCase , 'config.json' )
config_first.to_json_file(_lowerCamelCase )
lowercase = self.config_class.from_json_file(_lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCamelCase_ ( self ):
lowercase = self.config_class(**self.inputs_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
config_first.save_pretrained(_lowerCamelCase )
lowercase = self.config_class.from_pretrained(_lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCamelCase_ ( self ):
lowercase = self.config_class(**self.inputs_dict )
lowercase = 'test'
with tempfile.TemporaryDirectory() as tmpdirname:
lowercase = os.path.join(_lowerCamelCase , _lowerCamelCase )
config_first.save_pretrained(_lowerCamelCase )
lowercase = self.config_class.from_pretrained(_lowerCamelCase , subfolder=_lowerCamelCase )
self.parent.assertEqual(config_second.to_dict() , config_first.to_dict() )
def UpperCamelCase_ ( self ):
lowercase = self.config_class(**self.inputs_dict , num_labels=5 )
self.parent.assertEqual(len(config.idalabel ) , 5 )
self.parent.assertEqual(len(config.labelaid ) , 5 )
lowercase = 3
self.parent.assertEqual(len(config.idalabel ) , 3 )
self.parent.assertEqual(len(config.labelaid ) , 3 )
def UpperCamelCase_ ( self ):
if self.config_class.is_composition:
return
lowercase = self.config_class()
self.parent.assertIsNotNone(_lowerCamelCase )
def UpperCamelCase_ ( self ):
lowercase = copy.deepcopy(_lowerCamelCase )
lowercase = self.config_class(**_lowerCamelCase )
lowercase = []
for key, value in config_common_kwargs.items():
if key == "torch_dtype":
if not is_torch_available():
continue
else:
import torch
if config.torch_dtype != torch.floataa:
wrong_values.append(('torch_dtype', config.torch_dtype, torch.floataa) )
elif getattr(_lowerCamelCase , _lowerCamelCase ) != value:
wrong_values.append((key, getattr(_lowerCamelCase , _lowerCamelCase ), value) )
if len(_lowerCamelCase ) > 0:
lowercase = '\n'.join([F'- {v[0]}: got {v[1]} instead of {v[2]}' for v in wrong_values] )
raise ValueError(F'The following keys were not properly set in the config:\n{errors}' )
def UpperCamelCase_ ( self ):
self.create_and_test_config_common_properties()
self.create_and_test_config_to_json_string()
self.create_and_test_config_to_json_file()
self.create_and_test_config_from_and_save_pretrained()
self.create_and_test_config_from_and_save_pretrained_subfolder()
self.create_and_test_config_with_num_labels()
self.check_config_can_be_init_without_params()
self.check_config_arguments_init()
| 220 | 1 |
"""simple docstring"""
import socket
def a__ ( ) -> Union[str, Any]:
_A = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
_A = socket.gethostname()
_A = 1_2312
sock.connect((host, port) )
sock.send(B"Hello server!" )
with open("Received_file" , "wb" ) as out_file:
print("File opened" )
print("Receiving data..." )
while True:
_A = sock.recv(1024 )
if not data:
break
out_file.write(__lowercase )
print("Successfully received the file" )
sock.close()
print("Connection closed" )
if __name__ == "__main__":
main() | 163 |
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class snake_case :
def __init__( self : Optional[int] , a__ : Tuple , a__ : str=1_00 , a__ : Dict=13 , a__ : Tuple=30 , a__ : str=2 , a__ : List[Any]=3 , a__ : Dict=True , a__ : Optional[Any]=True , a__ : List[Any]=32 , a__ : Tuple=4 , a__ : Tuple=4 , a__ : Optional[int]=37 , a__ : Tuple="gelu" , a__ : Optional[int]=0.1 , a__ : int=0.1 , a__ : Optional[Any]=10 , a__ : Optional[int]=0.0_2 , a__ : Dict=3 , a__ : str=None , a__ : Any=[0, 1, 2, 3] , ) -> Tuple:
'''simple docstring'''
_A = parent
_A = 1_00
_A = batch_size
_A = image_size
_A = patch_size
_A = num_channels
_A = is_training
_A = use_labels
_A = hidden_size
_A = num_hidden_layers
_A = num_attention_heads
_A = intermediate_size
_A = hidden_act
_A = hidden_dropout_prob
_A = attention_probs_dropout_prob
_A = type_sequence_label_size
_A = initializer_range
_A = scope
_A = out_indices
_A = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_A = (image_size // patch_size) ** 2
_A = num_patches + 1
def a_ ( self : List[str] ) -> str:
'''simple docstring'''
_A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_A = None
_A = None
if self.use_labels:
_A = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_A = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_A = self.get_config()
return config, pixel_values, labels, pixel_labels
def a_ ( self : Tuple ) -> Union[str, Any]:
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a__ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def a_ ( self : Any , a__ : List[str] , a__ : Tuple , a__ : List[str] , a__ : str ) -> Any:
'''simple docstring'''
_A = BeitModel(config=a__ )
model.to(a__ )
model.eval()
_A = model(a__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def a_ ( self : List[str] , a__ : Optional[Any] , a__ : Tuple , a__ : Any , a__ : Optional[Any] ) -> Tuple:
'''simple docstring'''
_A = BeitForMaskedImageModeling(config=a__ )
model.to(a__ )
model.eval()
_A = model(a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def a_ ( self : Optional[Any] , a__ : Optional[int] , a__ : Optional[Any] , a__ : List[str] , a__ : Dict ) -> Dict:
'''simple docstring'''
_A = self.type_sequence_label_size
_A = BeitForImageClassification(a__ )
model.to(a__ )
model.eval()
_A = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_A = 1
_A = BeitForImageClassification(a__ )
model.to(a__ )
model.eval()
_A = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_A = model(a__ , labels=a__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def a_ ( self : Optional[Any] , a__ : Optional[Any] , a__ : Union[str, Any] , a__ : Union[str, Any] , a__ : Dict ) -> str:
'''simple docstring'''
_A = self.num_labels
_A = BeitForSemanticSegmentation(a__ )
model.to(a__ )
model.eval()
_A = model(a__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
_A = model(a__ , labels=a__ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def a_ ( self : List[Any] ) -> Any:
'''simple docstring'''
_A = self.prepare_config_and_inputs()
_A , _A , _A , _A = config_and_inputs
_A = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class snake_case ( _UpperCamelCase , _UpperCamelCase , unittest.TestCase):
__UpperCamelCase = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
__UpperCamelCase = (
{
'feature-extraction': BeitModel,
'image-classification': BeitForImageClassification,
'image-segmentation': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
__UpperCamelCase = False
__UpperCamelCase = False
__UpperCamelCase = False
def a_ ( self : List[Any] ) -> List[Any]:
'''simple docstring'''
_A = BeitModelTester(self )
_A = ConfigTester(self , config_class=a__ , has_text_modality=a__ , hidden_size=37 )
def a_ ( self : Any ) -> Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="BEiT does not use inputs_embeds" )
def a_ ( self : Optional[Any] ) -> Tuple:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`" )
def a_ ( self : Any ) -> int:
'''simple docstring'''
pass
def a_ ( self : Optional[int] ) -> Dict:
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(a__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_A = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a__ , nn.Linear ) )
def a_ ( self : Tuple ) -> List[Any]:
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_A = model_class(a__ )
_A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_A = [*signature.parameters.keys()]
_A = ["pixel_values"]
self.assertListEqual(arg_names[:1] , a__ )
def a_ ( self : Dict ) -> Any:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a__ )
def a_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a__ )
def a_ ( self : int ) -> List[Any]:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a__ )
def a_ ( self : Optional[int] ) -> Tuple:
'''simple docstring'''
_A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*a__ )
def a_ ( self : Dict ) -> Optional[Any]:
'''simple docstring'''
if not self.model_tester.is_training:
return
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(a__ ), BeitForMaskedImageModeling]:
continue
_A = model_class(a__ )
model.to(a__ )
model.train()
_A = self._prepare_for_class(a__ , a__ , return_labels=a__ )
_A = model(**a__ ).loss
loss.backward()
def a_ ( self : List[str] ) -> Dict:
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_A = False
_A = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(a__ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
_A = model_class(a__ )
model.gradient_checkpointing_enable()
model.to(a__ )
model.train()
_A = self._prepare_for_class(a__ , a__ , return_labels=a__ )
_A = model(**a__ ).loss
loss.backward()
def a_ ( self : Dict ) -> Optional[int]:
'''simple docstring'''
_A , _A = self.model_tester.prepare_config_and_inputs_for_common()
_A = _config_zero_init(a__ )
for model_class in self.all_model_classes:
_A = model_class(config=a__ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=F"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def a_ ( self : List[str] ) -> int:
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_A = BeitModel.from_pretrained(a__ )
self.assertIsNotNone(a__ )
def a__ ( ) -> Tuple:
_A = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class snake_case ( unittest.TestCase):
@cached_property
def a_ ( self : Optional[int] ) -> List[str]:
'''simple docstring'''
return BeitImageProcessor.from_pretrained("microsoft/beit-base-patch16-224" ) if is_vision_available() else None
@slow
def a_ ( self : Union[str, Any] ) -> Tuple:
'''simple docstring'''
_A = BeitForMaskedImageModeling.from_pretrained("microsoft/beit-base-patch16-224-pt22k" ).to(a__ )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(images=a__ , return_tensors="pt" ).pixel_values.to(a__ )
# prepare bool_masked_pos
_A = torch.ones((1, 1_96) , dtype=torch.bool ).to(a__ )
# forward pass
with torch.no_grad():
_A = model(pixel_values=a__ , bool_masked_pos=a__ )
_A = outputs.logits
# verify the logits
_A = torch.Size((1, 1_96, 81_92) )
self.assertEqual(logits.shape , a__ )
_A = torch.tensor(
[[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] ).to(a__ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , a__ , atol=1E-2 ) )
@slow
def a_ ( self : List[Any] ) -> Optional[int]:
'''simple docstring'''
_A = BeitForImageClassification.from_pretrained("microsoft/beit-base-patch16-224" ).to(a__ )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(images=a__ , return_tensors="pt" ).to(a__ )
# forward pass
with torch.no_grad():
_A = model(**a__ )
_A = outputs.logits
# verify the logits
_A = torch.Size((1, 10_00) )
self.assertEqual(logits.shape , a__ )
_A = torch.tensor([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] ).to(a__ )
self.assertTrue(torch.allclose(logits[0, :3] , a__ , atol=1E-4 ) )
_A = 2_81
self.assertEqual(logits.argmax(-1 ).item() , a__ )
@slow
def a_ ( self : List[Any] ) -> int:
'''simple docstring'''
_A = BeitForImageClassification.from_pretrained("microsoft/beit-large-patch16-224-pt22k-ft22k" ).to(
a__ )
_A = self.default_image_processor
_A = prepare_img()
_A = image_processor(images=a__ , return_tensors="pt" ).to(a__ )
# forward pass
with torch.no_grad():
_A = model(**a__ )
_A = outputs.logits
# verify the logits
_A = torch.Size((1, 2_18_41) )
self.assertEqual(logits.shape , a__ )
_A = torch.tensor([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] ).to(a__ )
self.assertTrue(torch.allclose(logits[0, :3] , a__ , atol=1E-4 ) )
_A = 23_96
self.assertEqual(logits.argmax(-1 ).item() , a__ )
@slow
def a_ ( self : Tuple ) -> Optional[Any]:
'''simple docstring'''
_A = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
_A = model.to(a__ )
_A = BeitImageProcessor(do_resize=a__ , size=6_40 , do_center_crop=a__ )
_A = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
_A = Image.open(ds[0]["file"] )
_A = image_processor(images=a__ , return_tensors="pt" ).to(a__ )
# forward pass
with torch.no_grad():
_A = model(**a__ )
_A = outputs.logits
# verify the logits
_A = torch.Size((1, 1_50, 1_60, 1_60) )
self.assertEqual(logits.shape , a__ )
_A = version.parse(PIL.__version__ ) < version.parse("9.0.0" )
if is_pillow_less_than_a:
_A = torch.tensor(
[
[[-4.9_2_2_5, -2.3_9_5_4, -3.0_5_2_2], [-2.8_8_2_2, -1.0_0_4_6, -1.7_5_6_1], [-2.9_5_4_9, -1.3_2_2_8, -2.1_3_4_7]],
[[-5.8_1_6_8, -3.4_1_2_9, -4.0_7_7_8], [-3.8_6_5_1, -2.2_2_1_4, -3.0_2_7_7], [-3.8_3_5_6, -2.4_6_4_3, -3.3_5_3_5]],
[[-0.0_0_7_8, 3.9_9_5_2, 4.0_7_5_4], [2.9_8_5_6, 4.6_9_4_4, 5.0_0_3_5], [3.2_4_1_3, 4.7_8_1_3, 4.9_9_6_9]],
] , device=a__ , )
else:
_A = torch.tensor(
[
[[-4.8_9_6_0, -2.3_6_8_8, -3.0_3_5_5], [-2.8_4_7_8, -0.9_8_3_6, -1.7_4_1_8], [-2.9_4_4_9, -1.3_3_3_2, -2.1_4_5_6]],
[[-5.8_0_8_1, -3.4_1_2_4, -4.1_0_0_6], [-3.8_5_6_1, -2.2_0_8_1, -3.0_3_2_3], [-3.8_3_6_5, -2.4_6_0_1, -3.3_6_6_9]],
[[-0.0_3_0_9, 3.9_8_6_8, 4.0_5_4_0], [2.9_6_4_0, 4.6_8_7_7, 4.9_9_7_6], [3.2_0_8_1, 4.7_6_9_0, 4.9_9_4_2]],
] , device=a__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , a__ , atol=1E-4 ) )
@slow
def a_ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_A = BeitForSemanticSegmentation.from_pretrained("microsoft/beit-base-finetuned-ade-640-640" )
_A = model.to(a__ )
_A = BeitImageProcessor(do_resize=a__ , size=6_40 , do_center_crop=a__ )
_A = load_dataset("hf-internal-testing/fixtures_ade20k" , split="test" )
_A = Image.open(ds[0]["file"] )
_A = image_processor(images=a__ , return_tensors="pt" ).to(a__ )
# forward pass
with torch.no_grad():
_A = model(**a__ )
_A = outputs.logits.detach().cpu()
_A = image_processor.post_process_semantic_segmentation(outputs=a__ , target_sizes=[(5_00, 3_00)] )
_A = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , a__ )
_A = image_processor.post_process_semantic_segmentation(outputs=a__ )
_A = torch.Size((1_60, 1_60) )
self.assertEqual(segmentation[0].shape , a__ ) | 163 | 1 |
from collections.abc import Generator
from math import sin
def _UpperCAmelCase ( a__):
'''simple docstring'''
if len(a__) != 3_2:
raise ValueError("""Input must be of length 32""")
a_ : str = B""""""
for i in [3, 2, 1, 0]:
little_endian += string_aa[8 * i : 8 * i + 8]
return little_endian
def _UpperCAmelCase ( a__):
'''simple docstring'''
if i < 0:
raise ValueError("""Input must be non-negative""")
a_ : Tuple = format(a__ , """08x""")[-8:]
a_ : Optional[int] = B""""""
for i in [3, 2, 1, 0]:
little_endian_hex += hex_rep[2 * i : 2 * i + 2].encode("""utf-8""")
return little_endian_hex
def _UpperCAmelCase ( a__):
'''simple docstring'''
a_ : Tuple = B""""""
for char in message:
bit_string += format(a__ , """08b""").encode("""utf-8""")
a_ : Dict = format(len(a__) , """064b""").encode("""utf-8""")
# Pad bit_string to a multiple of 512 chars
bit_string += b"1"
while len(a__) % 5_1_2 != 4_4_8:
bit_string += b"0"
bit_string += to_little_endian(start_len[3_2:]) + to_little_endian(start_len[:3_2])
return bit_string
def _UpperCAmelCase ( a__):
'''simple docstring'''
if len(a__) % 5_1_2 != 0:
raise ValueError("""Input must have length that's a multiple of 512""")
for pos in range(0 , len(a__) , 5_1_2):
a_ : Any = bit_string[pos : pos + 5_1_2]
a_ : str = []
for i in range(0 , 5_1_2 , 3_2):
block_words.append(int(to_little_endian(block[i : i + 3_2]) , 2))
yield block_words
def _UpperCAmelCase ( a__):
'''simple docstring'''
if i < 0:
raise ValueError("""Input must be non-negative""")
a_ : Optional[int] = format(a__ , """032b""")
a_ : List[str] = """"""
for c in i_str:
new_str += "1" if c == "0" else "0"
return int(a__ , 2)
def _UpperCAmelCase ( a__ , a__):
'''simple docstring'''
return (a + b) % 2**3_2
def _UpperCAmelCase ( a__ , a__):
'''simple docstring'''
if i < 0:
raise ValueError("""Input must be non-negative""")
if shift < 0:
raise ValueError("""Shift must be non-negative""")
return ((i << shift) ^ (i >> (3_2 - shift))) % 2**3_2
def _UpperCAmelCase ( a__):
'''simple docstring'''
a_ : Any = preprocess(a__)
a_ : Union[str, Any] = [int(2**3_2 * abs(sin(i + 1))) for i in range(6_4)]
# Starting states
a_ : Union[str, Any] = 0x67452301
a_ : Union[str, Any] = 0xefcdab89
a_ : List[Any] = 0x98badcfe
a_ : List[Any] = 0x10325476
a_ : int = [
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
7,
1_2,
1_7,
2_2,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
5,
9,
1_4,
2_0,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
4,
1_1,
1_6,
2_3,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
6,
1_0,
1_5,
2_1,
]
# Process bit string in chunks, each with 16 32-char words
for block_words in get_block_words(a__):
a_ : Union[str, Any] = aa
a_ : Union[str, Any] = ba
a_ : Tuple = ca
a_ : Union[str, Any] = da
# Hash current chunk
for i in range(6_4):
if i <= 1_5:
# f = (b & c) | (not_32(b) & d) # Alternate definition for f
a_ : List[str] = d ^ (b & (c ^ d))
a_ : List[Any] = i
elif i <= 3_1:
# f = (d & b) | (not_32(d) & c) # Alternate definition for f
a_ : List[Any] = c ^ (d & (b ^ c))
a_ : Optional[int] = (5 * i + 1) % 1_6
elif i <= 4_7:
a_ : Optional[Any] = b ^ c ^ d
a_ : Dict = (3 * i + 5) % 1_6
else:
a_ : Tuple = c ^ (b | not_aa(a__))
a_ : int = (7 * i) % 1_6
a_ : int = (f + a + added_consts[i] + block_words[g]) % 2**3_2
a_ : Optional[Any] = d
a_ : int = c
a_ : Union[str, Any] = b
a_ : List[Any] = sum_aa(a__ , left_rotate_aa(a__ , shift_amounts[i]))
# Add hashed chunk to running total
a_ : Optional[Any] = sum_aa(a__ , a__)
a_ : Union[str, Any] = sum_aa(a__ , a__)
a_ : Union[str, Any] = sum_aa(a__ , a__)
a_ : int = sum_aa(a__ , a__)
a_ : Any = reformat_hex(a__) + reformat_hex(a__) + reformat_hex(a__) + reformat_hex(a__)
return digest
if __name__ == "__main__":
import doctest
doctest.testmod()
| 248 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
_lowercase: Union[str, Any] = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase )
class _lowercase ( lowerCAmelCase ):
"""simple docstring"""
def __init__(self , *lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
super().__init__(*lowerCamelCase_ , **lowerCamelCase_ )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def UpperCamelCase_ (self , lowerCamelCase_=None , lowerCamelCase_=None , lowerCamelCase_=None ):
"""simple docstring"""
a = {}
a = {}
if prompt is not None:
a = prompt
if generate_kwargs is not None:
a = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
a = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"
" please use only one" )
a = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__(self , lowerCamelCase_ , **lowerCamelCase_ ):
"""simple docstring"""
return super().__call__(lowerCamelCase_ , **lowerCamelCase_ )
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=None ):
"""simple docstring"""
a = load_image(lowerCamelCase_ )
if prompt is not None:
if not isinstance(lowerCamelCase_ , lowerCamelCase_ ):
raise ValueError(
F'''Received an invalid text input, got - {type(lowerCamelCase_ )} - but expected a single string. '''
"Note also that one single text can be provided for conditional image to text generation." )
a = self.model.config.model_type
if model_type == "git":
a = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
a = self.tokenizer(text=lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ).input_ids
a = [self.tokenizer.cls_token_id] + input_ids
a = torch.tensor(lowerCamelCase_ ).unsqueeze(0 )
model_inputs.update({"input_ids": input_ids} )
elif model_type == "pix2struct":
a = self.image_processor(images=lowerCamelCase_ , header_text=lowerCamelCase_ , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
a = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
a = self.tokenizer(lowerCamelCase_ , return_tensors=self.framework )
model_inputs.update(lowerCamelCase_ )
else:
raise ValueError(F'''Model type {model_type} does not support conditional text generation''' )
else:
a = self.image_processor(images=lowerCamelCase_ , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
a = None
return model_inputs
def UpperCamelCase_ (self , lowerCamelCase_ , lowerCamelCase_=None ):
"""simple docstring"""
if (
"input_ids" in model_inputs
and isinstance(model_inputs["input_ids"] , lowerCamelCase_ )
and all(x is None for x in model_inputs["input_ids"] )
):
a = None
if generate_kwargs is None:
a = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
a = model_inputs.pop(self.model.main_input_name )
a = self.model.generate(lowerCamelCase_ , **lowerCamelCase_ , **lowerCamelCase_ )
return model_outputs
def UpperCamelCase_ (self , lowerCamelCase_ ):
"""simple docstring"""
a = []
for output_ids in model_outputs:
a = {
"generated_text": self.tokenizer.decode(
lowerCamelCase_ , skip_special_tokens=lowerCamelCase_ , )
}
records.append(lowerCamelCase_ )
return records
| 227 | 0 |
"""simple docstring"""
UpperCamelCase_ =[
"""DownloadConfig""",
"""DownloadManager""",
"""DownloadMode""",
"""StreamingDownloadManager""",
]
from .download_config import DownloadConfig
from .download_manager import DownloadManager, DownloadMode
from .streaming_download_manager import StreamingDownloadManager
| 128 |
"""simple docstring"""
from __future__ import annotations
from random import choice
def a_ ( _lowercase ):
return choice(_lowercase )
def a_ ( _lowercase , _lowercase ):
_UpperCamelCase : Optional[int] = random_pivot(_lowercase )
# partition based on pivot
# linear time
_UpperCamelCase : Union[str, Any] = [e for e in lst if e < pivot]
_UpperCamelCase : int = [e for e in lst if e > pivot]
# if we get lucky, pivot might be the element we want.
# we can easily see this:
# small (elements smaller than k)
# + pivot (kth element)
# + big (elements larger than k)
if len(_lowercase ) == k - 1:
return pivot
# pivot is in elements bigger than k
elif len(_lowercase ) < k - 1:
return kth_number(_lowercase , k - len(_lowercase ) - 1 )
# pivot is in elements smaller than k
else:
return kth_number(_lowercase , _lowercase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 128 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def _UpperCamelCase ( SCREAMING_SNAKE_CASE__ : int ):
'''simple docstring'''
if num <= 0:
UpperCAmelCase__ = F'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(SCREAMING_SNAKE_CASE__ )
UpperCAmelCase__ = [True] * (num + 1)
UpperCAmelCase__ = []
UpperCAmelCase__ = 2
UpperCAmelCase__ = int(math.sqrt(SCREAMING_SNAKE_CASE__ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(SCREAMING_SNAKE_CASE__ )
# Set multiples of start be False
for i in range(start * start , num + 1 , SCREAMING_SNAKE_CASE__ ):
if sieve[i] is True:
UpperCAmelCase__ = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(SCREAMING_SNAKE_CASE__ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('Enter a positive integer: ').strip())))
| 346 |
'''simple docstring'''
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( lowerCamelCase_ , unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : int = MgpstrTokenizer
lowerCAmelCase_ : List[str] = False
lowerCAmelCase_ : Optional[int] = {}
lowerCAmelCase_ : Any = False
def SCREAMING_SNAKE_CASE__ ( self : List[Any] ):
"""simple docstring"""
super().setUp()
# fmt: off
UpperCAmelCase__ = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
UpperCAmelCase__ = dict(zip(_UpperCAmelCase , range(len(_UpperCAmelCase ) ) ) )
UpperCAmelCase__ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as fp:
fp.write(json.dumps(_UpperCAmelCase ) + """\n""" )
def SCREAMING_SNAKE_CASE__ ( self : List[str] , **_UpperCAmelCase : Optional[Any] ):
"""simple docstring"""
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_UpperCAmelCase )
def SCREAMING_SNAKE_CASE__ ( self : Optional[Any] , _UpperCAmelCase : Tuple ):
"""simple docstring"""
UpperCAmelCase__ = """tester"""
UpperCAmelCase__ = """tester"""
return input_text, output_text
@unittest.skip("""MGP-STR always lower cases letters.""" )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
pass
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
UpperCAmelCase__ = self.get_tokenizers(do_lower_case=_UpperCAmelCase )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
UpperCAmelCase__ = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"""cls_token""": special_token} )
UpperCAmelCase__ = tokenizer.encode([special_token] , add_special_tokens=_UpperCAmelCase )
self.assertEqual(len(_UpperCAmelCase ) , 1 )
UpperCAmelCase__ = tokenizer.decode(_UpperCAmelCase , skip_special_tokens=_UpperCAmelCase )
self.assertTrue(special_token not in decoded )
def SCREAMING_SNAKE_CASE__ ( self : List[str] ):
"""simple docstring"""
UpperCAmelCase__ = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
UpperCAmelCase__ , UpperCAmelCase__ = self.get_input_output_texts(_UpperCAmelCase )
UpperCAmelCase__ = tokenizer.tokenize(_UpperCAmelCase )
UpperCAmelCase__ = tokenizer.convert_tokens_to_ids(_UpperCAmelCase )
UpperCAmelCase__ = tokenizer.encode(_UpperCAmelCase , add_special_tokens=_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase , _UpperCAmelCase )
UpperCAmelCase__ = tokenizer.convert_ids_to_tokens(_UpperCAmelCase )
self.assertNotEqual(len(_UpperCAmelCase ) , 0 )
UpperCAmelCase__ = tokenizer.decode(_UpperCAmelCase )
self.assertIsInstance(_UpperCAmelCase , _UpperCAmelCase )
self.assertEqual(text_a.replace(""" """ , """""" ) , _UpperCAmelCase )
@unittest.skip("""MGP-STR tokenizer only handles one sequence.""" )
def SCREAMING_SNAKE_CASE__ ( self : Any ):
"""simple docstring"""
pass
@unittest.skip("""inputs cannot be pretokenized in MgpstrTokenizer""" )
def SCREAMING_SNAKE_CASE__ ( self : str ):
"""simple docstring"""
pass
| 346 | 1 |
def _a ( SCREAMING_SNAKE_CASE__ : list ) -> list:
'''simple docstring'''
if len(SCREAMING_SNAKE_CASE__ ) < 2:
return collection
def circle_sort_util(SCREAMING_SNAKE_CASE__ : list , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : int ) -> bool:
SCREAMING_SNAKE_CASE__ : Any = False
if low == high:
return swapped
SCREAMING_SNAKE_CASE__ : List[str] = low
SCREAMING_SNAKE_CASE__ : Optional[int] = high
while left < right:
if collection[left] > collection[right]:
SCREAMING_SNAKE_CASE__ : List[str] = (
collection[right],
collection[left],
)
SCREAMING_SNAKE_CASE__ : int = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
SCREAMING_SNAKE_CASE__ : int = (
collection[right + 1],
collection[left],
)
SCREAMING_SNAKE_CASE__ : List[str] = True
SCREAMING_SNAKE_CASE__ : Any = low + int((high - low) / 2 )
SCREAMING_SNAKE_CASE__ : Union[str, Any] = circle_sort_util(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
SCREAMING_SNAKE_CASE__ : Any = circle_sort_util(SCREAMING_SNAKE_CASE__ , mid + 1 , SCREAMING_SNAKE_CASE__ )
return swapped or left_swap or right_swap
SCREAMING_SNAKE_CASE__ : int = True
while is_not_sorted is True:
SCREAMING_SNAKE_CASE__ : Optional[int] = circle_sort_util(SCREAMING_SNAKE_CASE__ , 0 , len(SCREAMING_SNAKE_CASE__ ) - 1 )
return collection
if __name__ == "__main__":
_lowerCamelCase : int = input('''Enter numbers separated by a comma:\n''').strip()
_lowerCamelCase : Optional[int] = [int(item) for item in user_input.split(''',''')]
print(circle_sort(unsorted))
| 353 |
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
_lowerCamelCase : int = logging.get_logger(__name__)
_lowerCamelCase : Optional[Any] = '''▁'''
_lowerCamelCase : Dict = {'''vocab_file''': '''sentencepiece.bpe.model'''}
_lowerCamelCase : int = {
'''vocab_file''': {
'''xlm-roberta-base''': '''https://huggingface.co/xlm-roberta-base/resolve/main/sentencepiece.bpe.model''',
'''xlm-roberta-large''': '''https://huggingface.co/xlm-roberta-large/resolve/main/sentencepiece.bpe.model''',
'''xlm-roberta-large-finetuned-conll02-dutch''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll02-spanish''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll03-english''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/sentencepiece.bpe.model'''
),
'''xlm-roberta-large-finetuned-conll03-german''': (
'''https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/sentencepiece.bpe.model'''
),
}
}
_lowerCamelCase : Optional[Any] = {
'''xlm-roberta-base''': 5_1_2,
'''xlm-roberta-large''': 5_1_2,
'''xlm-roberta-large-finetuned-conll02-dutch''': 5_1_2,
'''xlm-roberta-large-finetuned-conll02-spanish''': 5_1_2,
'''xlm-roberta-large-finetuned-conll03-english''': 5_1_2,
'''xlm-roberta-large-finetuned-conll03-german''': 5_1_2,
}
class lowerCamelCase (__lowerCamelCase ):
"""simple docstring"""
UpperCAmelCase_ = VOCAB_FILES_NAMES
UpperCAmelCase_ = PRETRAINED_VOCAB_FILES_MAP
UpperCAmelCase_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
UpperCAmelCase_ = ["input_ids", "attention_mask"]
def __init__( self : Dict, _UpperCAmelCase : str, _UpperCAmelCase : Optional[int]="<s>", _UpperCAmelCase : Optional[int]="</s>", _UpperCAmelCase : Dict="</s>", _UpperCAmelCase : List[Any]="<s>", _UpperCAmelCase : Union[str, Any]="<unk>", _UpperCAmelCase : List[Any]="<pad>", _UpperCAmelCase : str="<mask>", _UpperCAmelCase : Optional[Dict[str, Any]] = None, **_UpperCAmelCase : List[Any], ) -> None:
"""simple docstring"""
# Mask token behave like a normal word, i.e. include the space before it
SCREAMING_SNAKE_CASE__ : int = AddedToken(_UpperCAmelCase, lstrip=_UpperCAmelCase, rstrip=_UpperCAmelCase ) if isinstance(_UpperCAmelCase, _UpperCAmelCase ) else mask_token
SCREAMING_SNAKE_CASE__ : List[str] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_UpperCAmelCase, eos_token=_UpperCAmelCase, unk_token=_UpperCAmelCase, sep_token=_UpperCAmelCase, cls_token=_UpperCAmelCase, pad_token=_UpperCAmelCase, mask_token=_UpperCAmelCase, sp_model_kwargs=self.sp_model_kwargs, **_UpperCAmelCase, )
SCREAMING_SNAKE_CASE__ : str = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE__ : Tuple = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# Mimic fairseq token-to-id alignment for the first 4 token
SCREAMING_SNAKE_CASE__ : List[str] = {"<s>": 0, "<pad>": 1, "</s>": 2, "<unk>": 3}
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
SCREAMING_SNAKE_CASE__ : Dict = 1
SCREAMING_SNAKE_CASE__ : int = len(self.sp_model ) + self.fairseq_offset
SCREAMING_SNAKE_CASE__ : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : str ) -> Optional[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : List[Any] = self.__dict__.copy()
SCREAMING_SNAKE_CASE__ : List[Any] = None
SCREAMING_SNAKE_CASE__ : Dict = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : int, _UpperCAmelCase : List[Any] ) -> Any:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : Tuple = d
# for backward compatibility
if not hasattr(self, "sp_model_kwargs" ):
SCREAMING_SNAKE_CASE__ : Dict = {}
SCREAMING_SNAKE_CASE__ : Union[str, Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def A_ ( self : Any, _UpperCAmelCase : List[int], _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : List[str] = [self.cls_token_id]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def A_ ( self : List[Any], _UpperCAmelCase : List[int], _UpperCAmelCase : Optional[List[int]] = None, _UpperCAmelCase : bool = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_UpperCAmelCase, token_ids_a=_UpperCAmelCase, already_has_special_tokens=_UpperCAmelCase )
if token_ids_a is None:
return [1] + ([0] * len(_UpperCAmelCase )) + [1]
return [1] + ([0] * len(_UpperCAmelCase )) + [1, 1] + ([0] * len(_UpperCAmelCase )) + [1]
def A_ ( self : Union[str, Any], _UpperCAmelCase : List[int], _UpperCAmelCase : Optional[List[int]] = None ) -> List[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = [self.sep_token_id]
SCREAMING_SNAKE_CASE__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def A_ ( self : List[str] ) -> List[str]:
"""simple docstring"""
return len(self.sp_model ) + self.fairseq_offset + 1 # Add the <mask> token
def A_ ( self : List[Any] ) -> Optional[int]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : str = {self.convert_ids_to_tokens(_UpperCAmelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def A_ ( self : List[str], _UpperCAmelCase : str ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(_UpperCAmelCase, out_type=_UpperCAmelCase )
def A_ ( self : Optional[Any], _UpperCAmelCase : Union[str, Any] ) -> Union[str, Any]:
"""simple docstring"""
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
SCREAMING_SNAKE_CASE__ : Union[str, Any] = self.sp_model.PieceToId(_UpperCAmelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def A_ ( self : Tuple, _UpperCAmelCase : List[str] ) -> List[str]:
"""simple docstring"""
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def A_ ( self : Any, _UpperCAmelCase : int ) -> List[Any]:
"""simple docstring"""
SCREAMING_SNAKE_CASE__ : int = "".join(_UpperCAmelCase ).replace(_UpperCAmelCase, " " ).strip()
return out_string
def A_ ( self : Union[str, Any], _UpperCAmelCase : str, _UpperCAmelCase : Optional[str] = None ) -> Tuple[str]:
"""simple docstring"""
if not os.path.isdir(_UpperCAmelCase ):
logger.error(F'''Vocabulary path ({save_directory}) should be a directory''' )
return
SCREAMING_SNAKE_CASE__ : Optional[Any] = os.path.join(
_UpperCAmelCase, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_UpperCAmelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file, _UpperCAmelCase )
elif not os.path.isfile(self.vocab_file ):
with open(_UpperCAmelCase, "wb" ) as fi:
SCREAMING_SNAKE_CASE__ : Any = self.sp_model.serialized_model_proto()
fi.write(_UpperCAmelCase )
return (out_vocab_file,)
| 191 | 0 |
'''simple docstring'''
import os
from pathlib import Path
import numpy as np
import pytest
from pack_dataset import pack_data_dir
from parameterized import parameterized
from save_len_file import save_len_file
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from transformers.models.mbart.modeling_mbart import shift_tokens_right
from transformers.testing_utils import TestCasePlus, slow
from utils import FAIRSEQ_AVAILABLE, DistributedSortishSampler, LegacySeqaSeqDataset, SeqaSeqDataset
_A : Tuple ='''bert-base-cased'''
_A : Tuple ='''google/pegasus-xsum'''
_A : Optional[int] =[''' Sam ate lunch today.''', '''Sams lunch ingredients.''']
_A : Tuple =['''A very interesting story about what I ate for lunch.''', '''Avocado, celery, turkey, coffee''']
_A : str ='''patrickvonplaten/t5-tiny-random'''
_A : Union[str, Any] ='''sshleifer/bart-tiny-random'''
_A : List[str] ='''sshleifer/tiny-mbart'''
_A : Any ='''sshleifer/tiny-marian-en-de'''
def SCREAMING_SNAKE_CASE_ (UpperCamelCase , UpperCamelCase ) -> Optional[Any]:
lowerCamelCase__ : Tuple = """\n""".join(UpperCamelCase )
Path(UpperCamelCase ).open("""w""" ).writelines(UpperCamelCase )
def SCREAMING_SNAKE_CASE_ (UpperCamelCase ) -> Any:
for split in ["train", "val", "test"]:
_dump_articles(os.path.join(UpperCamelCase , f'''{split}.source''' ) , UpperCamelCase )
_dump_articles(os.path.join(UpperCamelCase , f'''{split}.target''' ) , UpperCamelCase )
return tmp_dir
class _lowercase ( _lowercase ):
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
@slow
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: Dict ):
lowerCamelCase__ : int = AutoTokenizer.from_pretrained(UpperCamelCase__ )
lowerCamelCase__ : Dict = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
lowerCamelCase__ : str = max(len(tokenizer.encode(UpperCamelCase__ ) ) for a in ARTICLES )
lowerCamelCase__ : str = max(len(tokenizer.encode(UpperCamelCase__ ) ) for a in SUMMARIES )
lowerCamelCase__ : List[str] = 4
lowerCamelCase__ : str = 8
assert max_len_target > max_src_len # Will be truncated
assert max_len_source > max_src_len # Will be truncated
lowerCamelCase__ , lowerCamelCase__ : Optional[Any] = """ro_RO""", """de_DE""" # ignored for all but mbart, but never causes error.
lowerCamelCase__ : Any = SeqaSeqDataset(
UpperCamelCase__ , data_dir=UpperCamelCase__ , type_path="""train""" , max_source_length=UpperCamelCase__ , max_target_length=UpperCamelCase__ , src_lang=UpperCamelCase__ , tgt_lang=UpperCamelCase__ , )
lowerCamelCase__ : Any = DataLoader(UpperCamelCase__ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert isinstance(UpperCamelCase__ , UpperCamelCase__ )
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_src_len
# show that targets are the same len
assert batch["labels"].shape[1] == max_tgt_len
if tok_name != MBART_TINY:
continue
# check language codes in correct place
lowerCamelCase__ : int = shift_tokens_right(batch["""labels"""] , tokenizer.pad_token_id )
assert batch["decoder_input_ids"][0, 0].item() == tokenizer.lang_code_to_id[tgt_lang]
assert batch["decoder_input_ids"][0, -1].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -2].item() == tokenizer.eos_token_id
assert batch["input_ids"][0, -1].item() == tokenizer.lang_code_to_id[src_lang]
break # No need to test every batch
@parameterized.expand([BART_TINY, BERT_BASE_CASED] )
def lowerCamelCase_ ( self: Any , UpperCamelCase__: Any ):
lowerCamelCase__ : Optional[int] = AutoTokenizer.from_pretrained(UpperCamelCase__ )
lowerCamelCase__ : Dict = make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() )
lowerCamelCase__ : str = max(len(tokenizer.encode(UpperCamelCase__ ) ) for a in ARTICLES )
lowerCamelCase__ : Tuple = max(len(tokenizer.encode(UpperCamelCase__ ) ) for a in SUMMARIES )
lowerCamelCase__ : str = 4
lowerCamelCase__ : Tuple = LegacySeqaSeqDataset(
UpperCamelCase__ , data_dir=UpperCamelCase__ , type_path="""train""" , max_source_length=20 , max_target_length=UpperCamelCase__ , )
lowerCamelCase__ : Tuple = DataLoader(UpperCamelCase__ , batch_size=2 , collate_fn=train_dataset.collate_fn )
for batch in dataloader:
assert batch["attention_mask"].shape == batch["input_ids"].shape
# show that articles were trimmed.
assert batch["input_ids"].shape[1] == max_len_source
assert 20 >= batch["input_ids"].shape[1] # trimmed significantly
# show that targets were truncated
assert batch["labels"].shape[1] == trunc_target # Truncated
assert max_len_target > trunc_target # Truncated
break # No need to test every batch
def lowerCamelCase_ ( self: Any ):
lowerCamelCase__ : str = AutoTokenizer.from_pretrained("""facebook/mbart-large-cc25""" )
lowerCamelCase__ : Union[str, Any] = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
lowerCamelCase__ : List[str] = tmp_dir.joinpath("""train.source""" ).open().readlines()
lowerCamelCase__ : int = Path(make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) )
pack_data_dir(UpperCamelCase__ , UpperCamelCase__ , 128 , UpperCamelCase__ )
lowerCamelCase__ : Any = {x.name for x in tmp_dir.iterdir()}
lowerCamelCase__ : Union[str, Any] = {x.name for x in save_dir.iterdir()}
lowerCamelCase__ : Union[str, Any] = save_dir.joinpath("""train.source""" ).open().readlines()
# orig: [' Sam ate lunch today.\n', 'Sams lunch ingredients.']
# desired_packed: [' Sam ate lunch today.\n Sams lunch ingredients.']
assert len(UpperCamelCase__ ) < len(UpperCamelCase__ )
assert len(UpperCamelCase__ ) == 1
assert len(packed_examples[0] ) == sum(len(UpperCamelCase__ ) for x in orig_examples )
assert orig_paths == new_paths
@pytest.mark.skipif(not FAIRSEQ_AVAILABLE , reason="""This test requires fairseq""" )
def lowerCamelCase_ ( self: Dict ):
if not FAIRSEQ_AVAILABLE:
return
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Tuple = self._get_dataset(max_len=64 )
lowerCamelCase__ : List[str] = 64
lowerCamelCase__ : Union[str, Any] = ds.make_dynamic_sampler(UpperCamelCase__ , required_batch_size_multiple=UpperCamelCase__ )
lowerCamelCase__ : Optional[Any] = [len(UpperCamelCase__ ) for x in batch_sampler]
assert len(set(UpperCamelCase__ ) ) > 1 # it's not dynamic batch size if every batch is the same length
assert sum(UpperCamelCase__ ) == len(UpperCamelCase__ ) # no dropped or added examples
lowerCamelCase__ : Optional[int] = DataLoader(UpperCamelCase__ , batch_sampler=UpperCamelCase__ , collate_fn=ds.collate_fn , num_workers=2 )
lowerCamelCase__ : Union[str, Any] = []
lowerCamelCase__ : Optional[int] = []
for batch in data_loader:
lowerCamelCase__ : Optional[Any] = batch["""input_ids"""].shape
lowerCamelCase__ : Tuple = src_shape[0]
assert bs % required_batch_size_multiple == 0 or bs < required_batch_size_multiple
lowerCamelCase__ : Union[str, Any] = np.product(batch["""input_ids"""].shape )
num_src_per_batch.append(UpperCamelCase__ )
if num_src_tokens > (max_tokens * 1.1):
failures.append(UpperCamelCase__ )
assert num_src_per_batch[0] == max(UpperCamelCase__ )
if failures:
raise AssertionError(F'''too many tokens in {len(UpperCamelCase__ )} batches''' )
def lowerCamelCase_ ( self: Optional[int] ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : Union[str, Any] = self._get_dataset(max_len=512 )
lowerCamelCase__ : Union[str, Any] = 2
lowerCamelCase__ : Optional[int] = ds.make_sortish_sampler(UpperCamelCase__ , shuffle=UpperCamelCase__ )
lowerCamelCase__ : Any = DataLoader(UpperCamelCase__ , batch_size=UpperCamelCase__ , collate_fn=ds.collate_fn , num_workers=2 )
lowerCamelCase__ : Dict = DataLoader(UpperCamelCase__ , batch_size=UpperCamelCase__ , collate_fn=ds.collate_fn , num_workers=2 , sampler=UpperCamelCase__ )
lowerCamelCase__ : List[str] = tokenizer.pad_token_id
def count_pad_tokens(UpperCamelCase__: Tuple , UpperCamelCase__: Optional[int]="input_ids" ):
return [batch[k].eq(UpperCamelCase__ ).sum().item() for batch in data_loader]
assert sum(count_pad_tokens(UpperCamelCase__ , k="""labels""" ) ) < sum(count_pad_tokens(UpperCamelCase__ , k="""labels""" ) )
assert sum(count_pad_tokens(UpperCamelCase__ ) ) < sum(count_pad_tokens(UpperCamelCase__ ) )
assert len(UpperCamelCase__ ) == len(UpperCamelCase__ )
def lowerCamelCase_ ( self: int , UpperCamelCase__: Tuple=1_000 , UpperCamelCase__: Dict=128 ):
if os.getenv("""USE_REAL_DATA""" , UpperCamelCase__ ):
lowerCamelCase__ : Tuple = """examples/seq2seq/wmt_en_ro"""
lowerCamelCase__ : List[str] = max_len * 2 * 64
if not Path(UpperCamelCase__ ).joinpath("""train.len""" ).exists():
save_len_file(UpperCamelCase__ , UpperCamelCase__ )
else:
lowerCamelCase__ : Optional[int] = """examples/seq2seq/test_data/wmt_en_ro"""
lowerCamelCase__ : str = max_len * 4
save_len_file(UpperCamelCase__ , UpperCamelCase__ )
lowerCamelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(UpperCamelCase__ )
lowerCamelCase__ : Any = SeqaSeqDataset(
UpperCamelCase__ , data_dir=UpperCamelCase__ , type_path="""train""" , max_source_length=UpperCamelCase__ , max_target_length=UpperCamelCase__ , n_obs=UpperCamelCase__ , )
return ds, max_tokens, tokenizer
def lowerCamelCase_ ( self: str ):
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ : List[Any] = self._get_dataset()
lowerCamelCase__ : Union[str, Any] = set(DistributedSortishSampler(UpperCamelCase__ , 256 , num_replicas=2 , rank=0 , add_extra_examples=UpperCamelCase__ ) )
lowerCamelCase__ : Dict = set(DistributedSortishSampler(UpperCamelCase__ , 256 , num_replicas=2 , rank=1 , add_extra_examples=UpperCamelCase__ ) )
assert idsa.intersection(UpperCamelCase__ ) == set()
@parameterized.expand(
[
MBART_TINY,
MARIAN_TINY,
T5_TINY,
BART_TINY,
PEGASUS_XSUM,
] , )
def lowerCamelCase_ ( self: Optional[int] , UpperCamelCase__: int ):
lowerCamelCase__ : Tuple = AutoTokenizer.from_pretrained(UpperCamelCase__ , use_fast=UpperCamelCase__ )
if tok_name == MBART_TINY:
lowerCamelCase__ : Tuple = SeqaSeqDataset(
UpperCamelCase__ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="""train""" , max_source_length=4 , max_target_length=8 , src_lang="""EN""" , tgt_lang="""FR""" , )
lowerCamelCase__ : Optional[Any] = train_dataset.dataset_kwargs
assert "src_lang" in kwargs and "tgt_lang" in kwargs
else:
lowerCamelCase__ : str = SeqaSeqDataset(
UpperCamelCase__ , data_dir=make_test_data_dir(tmp_dir=self.get_auto_remove_tmp_dir() ) , type_path="""train""" , max_source_length=4 , max_target_length=8 , )
lowerCamelCase__ : Optional[int] = train_dataset.dataset_kwargs
assert "add_prefix_space" not in kwargs if tok_name != BART_TINY else "add_prefix_space" in kwargs
assert len(UpperCamelCase__ ) == 1 if tok_name == BART_TINY else len(UpperCamelCase__ ) == 0
| 41 |
import requests
_A = "https://newsapi.org/v1/articles?source=bbc-news&sortBy=top&apiKey="
def lowerCamelCase__ ( __lowerCAmelCase : str ):
"""simple docstring"""
lowerCAmelCase_ = requests.get(_NEWS_API + bbc_news_api_key ).json()
# each article in the list is a dict
for i, article in enumerate(bbc_news_page["articles"] , 1 ):
print(F"""{i}.) {article['title']}""" )
if __name__ == "__main__":
fetch_bbc_news(bbc_news_api_key="<Your BBC News API key goes here>")
| 231 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import Audio, ClassLabel, Features
from .base import TaskTemplate
@dataclass(frozen=_a )
class a ( _a ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : str = field(default="audio-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({"audio": Audio()} )
SCREAMING_SNAKE_CASE : ClassVar[Features] = Features({"labels": ClassLabel} )
SCREAMING_SNAKE_CASE : str = "audio"
SCREAMING_SNAKE_CASE : str = "labels"
def lowerCamelCase__ ( self : Dict , snake_case : Union[str, Any] ) -> str:
if self.label_column not in features:
raise ValueError(f'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , snake_case ):
raise ValueError(f'Column {self.label_column} is not a ClassLabel.' )
__UpperCAmelCase : int = copy.deepcopy(self )
__UpperCAmelCase : Tuple = self.label_schema.copy()
__UpperCAmelCase : Tuple = features[self.label_column]
__UpperCAmelCase : Tuple = label_schema
return task_template
@property
def lowerCamelCase__ ( self : Optional[int] ) -> Dict[str, str]:
return {
self.audio_column: "audio",
self.label_column: "labels",
} | 240 |
'''simple docstring'''
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class a :
"""simple docstring"""
def __init__( self : List[str] , snake_case : Any , snake_case : Tuple=13 , snake_case : Any=10 , snake_case : Any=3 , snake_case : Dict=2 , snake_case : Optional[Any]=2 , snake_case : Union[str, Any]=True , snake_case : Dict=True , snake_case : List[Any]=32 , snake_case : Dict=5 , snake_case : List[str]=4 , snake_case : Dict=37 , snake_case : Any="gelu" , snake_case : Optional[int]=0.1 , snake_case : Union[str, Any]=0.1 , snake_case : Optional[int]=10 , snake_case : Dict=0.02 , snake_case : Tuple="divided_space_time" , snake_case : List[Any]=None , ) -> Optional[int]:
__UpperCAmelCase : Dict = parent
__UpperCAmelCase : Tuple = batch_size
__UpperCAmelCase : Optional[Any] = image_size
__UpperCAmelCase : Optional[int] = num_channels
__UpperCAmelCase : Optional[Any] = patch_size
__UpperCAmelCase : List[str] = num_frames
__UpperCAmelCase : Union[str, Any] = is_training
__UpperCAmelCase : str = use_labels
__UpperCAmelCase : Union[str, Any] = hidden_size
__UpperCAmelCase : Any = num_hidden_layers
__UpperCAmelCase : List[Any] = num_attention_heads
__UpperCAmelCase : Dict = intermediate_size
__UpperCAmelCase : List[str] = hidden_act
__UpperCAmelCase : List[Any] = hidden_dropout_prob
__UpperCAmelCase : int = attention_probs_dropout_prob
__UpperCAmelCase : Any = attention_type
__UpperCAmelCase : Optional[Any] = initializer_range
__UpperCAmelCase : str = scope
__UpperCAmelCase : List[str] = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
__UpperCAmelCase : str = (image_size // patch_size) ** 2
__UpperCAmelCase : int = (num_frames) * self.num_patches_per_frame + 1
def lowerCamelCase__ ( self : List[Any] ) -> Tuple:
__UpperCAmelCase : List[str] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
__UpperCAmelCase : Dict = None
if self.use_labels:
__UpperCAmelCase : List[str] = ids_tensor([self.batch_size] , self.num_labels )
__UpperCAmelCase : Optional[int] = self.get_config()
return config, pixel_values, labels
def lowerCamelCase__ ( self : List[Any] ) -> Optional[Any]:
__UpperCAmelCase : str = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
__UpperCAmelCase : Optional[int] = self.num_labels
return config
def lowerCamelCase__ ( self : Dict , snake_case : Any , snake_case : Optional[int] , snake_case : List[Any] ) -> Optional[Any]:
__UpperCAmelCase : List[Any] = TimesformerModel(config=snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : Tuple = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase__ ( self : int , snake_case : Tuple , snake_case : List[Any] , snake_case : Optional[Any] ) -> str:
__UpperCAmelCase : Union[str, Any] = TimesformerForVideoClassification(snake_case )
model.to(snake_case )
model.eval()
__UpperCAmelCase : str = model(snake_case )
# verify the logits shape
__UpperCAmelCase : List[str] = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , snake_case )
def lowerCamelCase__ ( self : Any ) -> List[str]:
__UpperCAmelCase : Optional[int] = self.prepare_config_and_inputs()
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase : int = config_and_inputs
__UpperCAmelCase : Optional[Any] = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a ( _a , _a , unittest.TestCase ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
SCREAMING_SNAKE_CASE : Optional[Any] = (
{"feature-extraction": TimesformerModel, "video-classification": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
SCREAMING_SNAKE_CASE : int = False
SCREAMING_SNAKE_CASE : str = False
SCREAMING_SNAKE_CASE : Union[str, Any] = False
SCREAMING_SNAKE_CASE : List[Any] = False
def lowerCamelCase__ ( self : int ) -> str:
__UpperCAmelCase : Tuple = TimesformerModelTester(self )
__UpperCAmelCase : str = ConfigTester(
self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 )
def lowerCamelCase__ ( self : Dict , snake_case : Optional[int] , snake_case : Optional[int] , snake_case : Optional[int]=False ) -> Union[str, Any]:
__UpperCAmelCase : Union[str, Any] = copy.deepcopy(snake_case )
if return_labels:
if model_class in get_values(snake_case ):
__UpperCAmelCase : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=snake_case )
return inputs_dict
def lowerCamelCase__ ( self : Optional[Any] ) -> Union[str, Any]:
self.config_tester.run_common_tests()
@unittest.skip(reason='''TimeSformer does not use inputs_embeds''' )
def lowerCamelCase__ ( self : Any ) -> Dict:
pass
def lowerCamelCase__ ( self : Optional[Any] ) -> int:
__UpperCAmelCase , __UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Optional[Any] = model_class(snake_case )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__UpperCAmelCase : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case , nn.Linear ) )
def lowerCamelCase__ ( self : List[Any] ) -> List[Any]:
__UpperCAmelCase , __UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : Dict = model_class(snake_case )
__UpperCAmelCase : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__UpperCAmelCase : int = [*signature.parameters.keys()]
__UpperCAmelCase : List[str] = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , snake_case )
def lowerCamelCase__ ( self : Union[str, Any] ) -> Dict:
__UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def lowerCamelCase__ ( self : Tuple ) -> Dict:
__UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*snake_case )
@slow
def lowerCamelCase__ ( self : Union[str, Any] ) -> str:
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__UpperCAmelCase : Optional[int] = TimesformerModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def lowerCamelCase__ ( self : Dict ) -> List[Any]:
if not self.has_attentions:
pass
else:
__UpperCAmelCase , __UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__UpperCAmelCase : Optional[int] = True
for model_class in self.all_model_classes:
__UpperCAmelCase : Tuple = self.model_tester.seq_length
__UpperCAmelCase : int = self.model_tester.num_frames
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : Any = False
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : Tuple = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
__UpperCAmelCase : int = model(**self._prepare_for_class(snake_case , snake_case ) )
__UpperCAmelCase : str = outputs.attentions
self.assertEqual(len(snake_case ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__UpperCAmelCase : Dict = True
__UpperCAmelCase : str = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
__UpperCAmelCase : List[Any] = model(**self._prepare_for_class(snake_case , snake_case ) )
__UpperCAmelCase : List[Any] = outputs.attentions
self.assertEqual(len(snake_case ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
__UpperCAmelCase : Tuple = len(snake_case )
# Check attention is always last and order is fine
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : Optional[int] = True
__UpperCAmelCase : Union[str, Any] = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Any = model(**self._prepare_for_class(snake_case , snake_case ) )
self.assertEqual(out_len + 1 , len(snake_case ) )
__UpperCAmelCase : Any = outputs.attentions
self.assertEqual(len(snake_case ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def lowerCamelCase__ ( self : Dict ) -> Union[str, Any]:
def check_hidden_states_output(snake_case : Optional[Any] , snake_case : Optional[int] , snake_case : Tuple ):
__UpperCAmelCase : str = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
__UpperCAmelCase : Any = model(**self._prepare_for_class(snake_case , snake_case ) )
__UpperCAmelCase : int = outputs.hidden_states
__UpperCAmelCase : Union[str, Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(snake_case ) , snake_case )
__UpperCAmelCase : int = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
__UpperCAmelCase , __UpperCAmelCase : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__UpperCAmelCase : str = True
check_hidden_states_output(snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__UpperCAmelCase : str = True
check_hidden_states_output(snake_case , snake_case , snake_case )
def _a ( ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] = hf_hub_download(
repo_id='''hf-internal-testing/spaghetti-video''' , filename='''eating_spaghetti.npy''' , repo_type='''dataset''' )
__UpperCAmelCase : int = np.load(_lowercase )
return list(_lowercase )
@require_torch
@require_vision
class a ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def lowerCamelCase__ ( self : Union[str, Any] ) -> str:
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def lowerCamelCase__ ( self : str ) -> List[Any]:
__UpperCAmelCase : Union[str, Any] = TimesformerForVideoClassification.from_pretrained('''facebook/timesformer-base-finetuned-k400''' ).to(
snake_case )
__UpperCAmelCase : str = self.default_image_processor
__UpperCAmelCase : Dict = prepare_video()
__UpperCAmelCase : Union[str, Any] = image_processor(video[:8] , return_tensors='''pt''' ).to(snake_case )
# forward pass
with torch.no_grad():
__UpperCAmelCase : List[Any] = model(**snake_case )
# verify the logits
__UpperCAmelCase : Optional[Any] = torch.Size((1, 400) )
self.assertEqual(outputs.logits.shape , snake_case )
__UpperCAmelCase : List[Any] = torch.tensor([-0.3_016, -0.7_713, -0.4_205] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1E-4 ) ) | 240 | 1 |
"""simple docstring"""
import pytest
import datasets.config
from datasets.utils.info_utils import is_small_dataset
@pytest.mark.parametrize('dataset_size' , [None, 400 * 2**20, 600 * 2**20] )
@pytest.mark.parametrize('input_in_memory_max_size' , ['default', 0, 100 * 2**20, 900 * 2**20] )
def _snake_case ( snake_case__ : List[Any] , snake_case__ : str , snake_case__ : List[str] ):
if input_in_memory_max_size != "default":
monkeypatch.setattr(datasets.config , 'IN_MEMORY_MAX_SIZE' , snake_case__ )
A = datasets.config.IN_MEMORY_MAX_SIZE
if input_in_memory_max_size == "default":
assert in_memory_max_size == 0
else:
assert in_memory_max_size == input_in_memory_max_size
if dataset_size and in_memory_max_size:
A = dataset_size < in_memory_max_size
else:
A = False
A = is_small_dataset(snake_case__ )
assert result == expected | 74 | """simple docstring"""
import operator
def _lowerCamelCase( a , a = False , a = None ):
__a = operator.lt if reverse else operator.gt
__a = solution or []
if not arr:
return solution
__a = [arr.pop(0 )]
for i, item in enumerate(a ):
if _operator(a , sublist[-1] ):
sublist.append(a )
arr.pop(a )
# merging sublist into solution list
if not solution:
solution.extend(a )
else:
while sublist:
__a = sublist.pop(0 )
for i, xx in enumerate(a ):
if not _operator(a , a ):
solution.insert(a , a )
break
else:
solution.append(a )
strand_sort(a , a , a )
return solution
if __name__ == "__main__":
assert strand_sort([4, 3, 5, 1, 2]) == [1, 2, 3, 4, 5]
assert strand_sort([4, 3, 5, 1, 2], reverse=True) == [5, 4, 3, 2, 1]
| 261 | 0 |
from typing import Dict
import numpy as np
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, GenericTensor, Pipeline, PipelineException
if is_tf_available():
import tensorflow as tf
from ..tf_utils import stable_softmax
if is_torch_available():
import torch
UpperCAmelCase : Any = logging.get_logger(__name__)
@add_end_docstrings(
UpperCamelCase__ , R"""
top_k (`int`, defaults to 5):
The number of predictions to return.
targets (`str` or `List[str]`, *optional*):
When passed, the model will limit the scores to the passed targets instead of looking up in the whole
vocab. If the provided targets are not in the model vocab, they will be tokenized and the first resulting
token will be used (with a warning, and that might be slower).
""" , )
class __lowerCAmelCase ( UpperCamelCase__):
def _lowercase ( self , lowerCAmelCase__ ) -> np.ndarray:
'''simple docstring'''
if self.framework == "tf":
a__ : Dict =tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()
elif self.framework == "pt":
a__ : List[Any] =torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=lowerCAmelCase__ )
else:
raise ValueError("Unsupported framework" )
return masked_index
def _lowercase ( self , lowerCAmelCase__ ) -> np.ndarray:
'''simple docstring'''
a__ : int =self.get_masked_index(lowerCAmelCase__ )
a__ : Tuple =np.prod(masked_index.shape )
if numel < 1:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , F'''No mask_token ({self.tokenizer.mask_token}) found on the input''' , )
def _lowercase ( self , lowerCAmelCase__ ) -> Any:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
for model_input in model_inputs:
self._ensure_exactly_one_mask_token(model_input["input_ids"][0] )
else:
for input_ids in model_inputs["input_ids"]:
self._ensure_exactly_one_mask_token(lowerCAmelCase__ )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__=None , **lowerCAmelCase__ ) -> Dict[str, GenericTensor]:
'''simple docstring'''
if return_tensors is None:
a__ : Tuple =self.framework
a__ : List[Any] =self.tokenizer(lowerCAmelCase__ , return_tensors=lowerCAmelCase__ )
self.ensure_exactly_one_mask_token(lowerCAmelCase__ )
return model_inputs
def _lowercase ( self , lowerCAmelCase__ ) -> int:
'''simple docstring'''
a__ : Union[str, Any] =self.model(**lowerCAmelCase__ )
a__ : Union[str, Any] =model_inputs["input_ids"]
return model_outputs
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__=5 , lowerCAmelCase__=None ) -> Optional[Any]:
'''simple docstring'''
if target_ids is not None and target_ids.shape[0] < top_k:
a__ : Dict =target_ids.shape[0]
a__ : str =model_outputs["input_ids"][0]
a__ : List[str] =model_outputs["logits"]
if self.framework == "tf":
a__ : Dict =tf.where(input_ids == self.tokenizer.mask_token_id ).numpy()[:, 0]
a__ : Optional[int] =outputs.numpy()
a__ : Optional[Any] =outputs[0, masked_index, :]
a__ : List[Any] =stable_softmax(lowerCAmelCase__ , axis=-1 )
if target_ids is not None:
a__ : List[str] =tf.gather_nd(tf.squeeze(lowerCAmelCase__ , 0 ) , target_ids.reshape(-1 , 1 ) )
a__ : Any =tf.expand_dims(lowerCAmelCase__ , 0 )
a__ : List[Any] =tf.math.top_k(lowerCAmelCase__ , k=lowerCAmelCase__ )
a__ , a__ : Any =topk.values.numpy(), topk.indices.numpy()
else:
a__ : List[str] =torch.nonzero(input_ids == self.tokenizer.mask_token_id , as_tuple=lowerCAmelCase__ ).squeeze(-1 )
# Fill mask pipeline supports only one ${mask_token} per sample
a__ : int =outputs[0, masked_index, :]
a__ : Union[str, Any] =logits.softmax(dim=-1 )
if target_ids is not None:
a__ : Optional[Any] =probs[..., target_ids]
a__ , a__ : str =probs.topk(lowerCAmelCase__ )
a__ : Optional[int] =[]
a__ : Dict =values.shape[0] == 1
for i, (_values, _predictions) in enumerate(zip(values.tolist() , predictions.tolist() ) ):
a__ : List[str] =[]
for v, p in zip(_values , _predictions ):
# Copy is important since we're going to modify this array in place
a__ : Union[str, Any] =input_ids.numpy().copy()
if target_ids is not None:
a__ : Optional[int] =target_ids[p].tolist()
a__ : str =p
# Filter padding out:
a__ : Tuple =tokens[np.where(tokens != self.tokenizer.pad_token_id )]
# Originally we skip special tokens to give readable output.
# For multi masks though, the other [MASK] would be removed otherwise
# making the output look odd, so we add them back
a__ : Dict =self.tokenizer.decode(lowerCAmelCase__ , skip_special_tokens=lowerCAmelCase__ )
a__ : Dict ={"score": v, "token": p, "token_str": self.tokenizer.decode([p] ), "sequence": sequence}
row.append(lowerCAmelCase__ )
result.append(lowerCAmelCase__ )
if single_mask:
return result[0]
return result
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__=None ) -> Any:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
a__ : str =[targets]
try:
a__ : Tuple =self.tokenizer.get_vocab()
except Exception:
a__ : List[str] ={}
a__ : int =[]
for target in targets:
a__ : Tuple =vocab.get(lowerCAmelCase__ , lowerCAmelCase__ )
if id_ is None:
a__ : Optional[int] =self.tokenizer(
lowerCAmelCase__ , add_special_tokens=lowerCAmelCase__ , return_attention_mask=lowerCAmelCase__ , return_token_type_ids=lowerCAmelCase__ , max_length=1 , truncation=lowerCAmelCase__ , )["input_ids"]
if len(lowerCAmelCase__ ) == 0:
logger.warning(
F'''The specified target token `{target}` does not exist in the model vocabulary. '''
"We cannot replace it with anything meaningful, ignoring it" )
continue
a__ : int =input_ids[0]
# XXX: If users encounter this pass
# it becomes pretty slow, so let's make sure
# The warning enables them to fix the input to
# get faster performance.
logger.warning(
F'''The specified target token `{target}` does not exist in the model vocabulary. '''
F'''Replacing with `{self.tokenizer.convert_ids_to_tokens(id_ )}`.''' )
target_ids.append(id_ )
a__ : List[Any] =list(set(lowerCAmelCase__ ) )
if len(lowerCAmelCase__ ) == 0:
raise ValueError("At least one target must be provided when passed." )
a__ : Tuple =np.array(lowerCAmelCase__ )
return target_ids
def _lowercase ( self , lowerCAmelCase__=None , lowerCAmelCase__=None ) -> Optional[int]:
'''simple docstring'''
a__ : Union[str, Any] ={}
if targets is not None:
a__ : Any =self.get_target_ids(lowerCAmelCase__ , lowerCAmelCase__ )
a__ : Union[str, Any] =target_ids
if top_k is not None:
a__ : Any =top_k
if self.tokenizer.mask_token_id is None:
raise PipelineException(
"fill-mask" , self.model.base_model_prefix , "The tokenizer does not define a `mask_token`." )
return {}, {}, postprocess_params
def __call__( self , lowerCAmelCase__ , *lowerCAmelCase__ , **lowerCAmelCase__ ) -> str:
'''simple docstring'''
a__ : List[str] =super().__call__(lowerCAmelCase__ , **lowerCAmelCase__ )
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) and len(lowerCAmelCase__ ) == 1:
return outputs[0]
return outputs
| 148 |
from __future__ import annotations
class __lowerCAmelCase :
def __init__( self , lowerCAmelCase__ ) -> str:
'''simple docstring'''
a__ : int =TypeError(
"Matrices must be formed from a list of zero or more lists containing at "
"least one and the same number of values, each of which must be of type "
"int or float." )
if len(lowerCAmelCase__ ) != 0:
a__ : List[str] =len(rows[0] )
if cols == 0:
raise error
for row in rows:
if len(lowerCAmelCase__ ) != cols:
raise error
for value in row:
if not isinstance(lowerCAmelCase__ , (int, float) ):
raise error
a__ : List[Any] =rows
else:
a__ : str =[]
def _lowercase ( self ) -> list[list[int]]:
'''simple docstring'''
return [[row[i] for row in self.rows] for i in range(len(self.rows[0] ) )]
@property
def _lowercase ( self ) -> int:
'''simple docstring'''
return len(self.rows )
@property
def _lowercase ( self ) -> int:
'''simple docstring'''
return len(self.rows[0] )
@property
def _lowercase ( self ) -> tuple[int, int]:
'''simple docstring'''
return (self.num_rows, self.num_columns)
@property
def _lowercase ( self ) -> bool:
'''simple docstring'''
return self.order[0] == self.order[1]
def _lowercase ( self ) -> Matrix:
'''simple docstring'''
a__ : str =[
[0 if column_num != row_num else 1 for column_num in range(self.num_rows )]
for row_num in range(self.num_rows )
]
return Matrix(lowerCAmelCase__ )
def _lowercase ( self ) -> int:
'''simple docstring'''
if not self.is_square:
return 0
if self.order == (0, 0):
return 1
if self.order == (1, 1):
return int(self.rows[0][0] )
if self.order == (2, 2):
return int(
(self.rows[0][0] * self.rows[1][1])
- (self.rows[0][1] * self.rows[1][0]) )
else:
return sum(
self.rows[0][column] * self.cofactors().rows[0][column]
for column in range(self.num_columns ) )
def _lowercase ( self ) -> bool:
'''simple docstring'''
return bool(self.determinant() )
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
'''simple docstring'''
a__ : List[str] =[
[
self.rows[other_row][other_column]
for other_column in range(self.num_columns )
if other_column != column
]
for other_row in range(self.num_rows )
if other_row != row
]
return Matrix(lowerCAmelCase__ ).determinant()
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
'''simple docstring'''
if (row + column) % 2 == 0:
return self.get_minor(lowerCAmelCase__ , lowerCAmelCase__ )
return -1 * self.get_minor(lowerCAmelCase__ , lowerCAmelCase__ )
def _lowercase ( self ) -> Matrix:
'''simple docstring'''
return Matrix(
[
[self.get_minor(lowerCAmelCase__ , lowerCAmelCase__ ) for column in range(self.num_columns )]
for row in range(self.num_rows )
] )
def _lowercase ( self ) -> Matrix:
'''simple docstring'''
return Matrix(
[
[
self.minors().rows[row][column]
if (row + column) % 2 == 0
else self.minors().rows[row][column] * -1
for column in range(self.minors().num_columns )
]
for row in range(self.minors().num_rows )
] )
def _lowercase ( self ) -> Matrix:
'''simple docstring'''
a__ : Dict =[
[self.cofactors().rows[column][row] for column in range(self.num_columns )]
for row in range(self.num_rows )
]
return Matrix(lowerCAmelCase__ )
def _lowercase ( self ) -> Matrix:
'''simple docstring'''
a__ : Union[str, Any] =self.determinant()
if not determinant:
raise TypeError("Only matrices with a non-zero determinant have an inverse" )
return self.adjugate() * (1 / determinant)
def __repr__( self ) -> str:
'''simple docstring'''
return str(self.rows )
def __str__( self ) -> str:
'''simple docstring'''
if self.num_rows == 0:
return "[]"
if self.num_rows == 1:
return "[[" + ". ".join(str(self.rows[0] ) ) + "]]"
return (
"["
+ "\n ".join(
[
"[" + ". ".join([str(lowerCAmelCase__ ) for value in row] ) + ".]"
for row in self.rows
] )
+ "]"
)
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> None:
'''simple docstring'''
a__ : List[str] =TypeError("Row must be a list containing all ints and/or floats" )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise type_error
for value in row:
if not isinstance(lowerCAmelCase__ , (int, float) ):
raise type_error
if len(lowerCAmelCase__ ) != self.num_columns:
raise ValueError(
"Row must be equal in length to the other rows in the matrix" )
if position is None:
self.rows.append(lowerCAmelCase__ )
else:
a__ : Tuple =self.rows[0:position] + [row] + self.rows[position:]
def _lowercase ( self , lowerCAmelCase__ , lowerCAmelCase__ = None ) -> None:
'''simple docstring'''
a__ : str =TypeError(
"Column must be a list containing all ints and/or floats" )
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise type_error
for value in column:
if not isinstance(lowerCAmelCase__ , (int, float) ):
raise type_error
if len(lowerCAmelCase__ ) != self.num_rows:
raise ValueError(
"Column must be equal in length to the other columns in the matrix" )
if position is None:
a__ : Optional[Any] =[self.rows[i] + [column[i]] for i in range(self.num_rows )]
else:
a__ : Any =[
self.rows[i][0:position] + [column[i]] + self.rows[i][position:]
for i in range(self.num_rows )
]
def __eq__( self , lowerCAmelCase__ ) -> bool:
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
return NotImplemented
return self.rows == other.rows
def __ne__( self , lowerCAmelCase__ ) -> bool:
'''simple docstring'''
return not self == other
def __neg__( self ) -> Matrix:
'''simple docstring'''
return self * -1
def __add__( self , lowerCAmelCase__ ) -> Matrix:
'''simple docstring'''
if self.order != other.order:
raise ValueError("Addition requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] + other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __sub__( self , lowerCAmelCase__ ) -> Matrix:
'''simple docstring'''
if self.order != other.order:
raise ValueError("Subtraction requires matrices of the same order" )
return Matrix(
[
[self.rows[i][j] - other.rows[i][j] for j in range(self.num_columns )]
for i in range(self.num_rows )
] )
def __mul__( self , lowerCAmelCase__ ) -> Matrix:
'''simple docstring'''
if isinstance(lowerCAmelCase__ , (int, float) ):
return Matrix(
[[int(element * other ) for element in row] for row in self.rows] )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
if self.num_columns != other.num_rows:
raise ValueError(
"The number of columns in the first matrix must "
"be equal to the number of rows in the second" )
return Matrix(
[
[Matrix.dot_product(lowerCAmelCase__ , lowerCAmelCase__ ) for column in other.columns()]
for row in self.rows
] )
else:
raise TypeError(
"A Matrix can only be multiplied by an int, float, or another matrix" )
def __pow__( self , lowerCAmelCase__ ) -> Matrix:
'''simple docstring'''
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise TypeError("A Matrix can only be raised to the power of an int" )
if not self.is_square:
raise ValueError("Only square matrices can be raised to a power" )
if other == 0:
return self.identity()
if other < 0:
if self.is_invertable():
return self.inverse() ** (-other)
raise ValueError(
"Only invertable matrices can be raised to a negative power" )
a__ : Tuple =self
for _ in range(other - 1 ):
result *= self
return result
@classmethod
def _lowercase ( cls , lowerCAmelCase__ , lowerCAmelCase__ ) -> int:
'''simple docstring'''
return sum(row[i] * column[i] for i in range(len(lowerCAmelCase__ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 148 | 1 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class _a ( _lowerCAmelCase ):
def __snake_case (self ) -> Tuple:
UpperCAmelCase_: Union[str, Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE_, """width_multiplier""" ) )
class _a :
def __init__(self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_=13, SCREAMING_SNAKE_CASE_=64, SCREAMING_SNAKE_CASE_=2, SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_="swish", SCREAMING_SNAKE_CASE_=3, SCREAMING_SNAKE_CASE_=32, SCREAMING_SNAKE_CASE_=0.1, SCREAMING_SNAKE_CASE_=0.0_2, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=True, SCREAMING_SNAKE_CASE_=10, SCREAMING_SNAKE_CASE_=None, SCREAMING_SNAKE_CASE_=0.2_5, SCREAMING_SNAKE_CASE_=0.0, SCREAMING_SNAKE_CASE_=0.0, ) -> Dict:
UpperCAmelCase_: Optional[Any] = parent
UpperCAmelCase_: int = batch_size
UpperCAmelCase_: Optional[Any] = image_size
UpperCAmelCase_: Any = patch_size
UpperCAmelCase_: Optional[Any] = num_channels
UpperCAmelCase_: Any = make_divisible(512 * width_multiplier, divisor=8 )
UpperCAmelCase_: Union[str, Any] = hidden_act
UpperCAmelCase_: List[Any] = conv_kernel_size
UpperCAmelCase_: Optional[int] = output_stride
UpperCAmelCase_: Optional[Any] = classifier_dropout_prob
UpperCAmelCase_: List[str] = use_labels
UpperCAmelCase_: Any = is_training
UpperCAmelCase_: Dict = num_labels
UpperCAmelCase_: Optional[int] = initializer_range
UpperCAmelCase_: List[str] = scope
UpperCAmelCase_: Any = width_multiplier
UpperCAmelCase_: Optional[Any] = ffn_dropout
UpperCAmelCase_: List[Any] = attn_dropout
def __snake_case (self ) -> Tuple:
UpperCAmelCase_: Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_: str = None
UpperCAmelCase_: Union[str, Any] = None
if self.use_labels:
UpperCAmelCase_: Optional[Any] = ids_tensor([self.batch_size], self.num_labels )
UpperCAmelCase_: int = ids_tensor([self.batch_size, self.image_size, self.image_size], self.num_labels )
UpperCAmelCase_: Optional[int] = self.get_config()
return config, pixel_values, labels, pixel_labels
def __snake_case (self ) -> Optional[Any]:
return MobileViTVaConfig(
image_size=self.image_size, patch_size=self.patch_size, num_channels=self.num_channels, hidden_act=self.hidden_act, conv_kernel_size=self.conv_kernel_size, output_stride=self.output_stride, classifier_dropout_prob=self.classifier_dropout_prob, initializer_range=self.initializer_range, width_multiplier=self.width_multiplier, ffn_dropout=self.ffn_dropout_prob, attn_dropout=self.attn_dropout_prob, )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> List[str]:
UpperCAmelCase_: Any = MobileViTVaModel(config=SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: List[str] = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.last_hidden_state.shape, (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
UpperCAmelCase_: Union[str, Any] = self.num_labels
UpperCAmelCase_: List[Any] = MobileViTVaForImageClassification(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: Any = model(SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(result.logits.shape, (self.batch_size, self.num_labels) )
def __snake_case (self, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) -> Dict:
UpperCAmelCase_: str = self.num_labels
UpperCAmelCase_: Union[str, Any] = MobileViTVaForSemanticSegmentation(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
UpperCAmelCase_: int = model(SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
UpperCAmelCase_: Tuple = model(SCREAMING_SNAKE_CASE_, labels=SCREAMING_SNAKE_CASE_ )
self.parent.assertEqual(
result.logits.shape, (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
), )
def __snake_case (self ) -> List[Any]:
UpperCAmelCase_: int = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_: Optional[Any] = config_and_inputs
UpperCAmelCase_: Union[str, Any] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class _a ( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
A = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
A = (
{
'''feature-extraction''': MobileViTVaModel,
'''image-classification''': MobileViTVaForImageClassification,
'''image-segmentation''': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
A = False
A = False
A = False
A = False
def __snake_case (self ) -> Union[str, Any]:
UpperCAmelCase_: List[str] = MobileViTVaModelTester(self )
UpperCAmelCase_: Dict = MobileViTVaConfigTester(self, config_class=SCREAMING_SNAKE_CASE_, has_text_modality=SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> List[str]:
self.config_tester.run_common_tests()
@unittest.skip(reason="""MobileViTV2 does not use inputs_embeds""" )
def __snake_case (self ) -> Tuple:
pass
@unittest.skip(reason="""MobileViTV2 does not support input and output embeddings""" )
def __snake_case (self ) -> List[Any]:
pass
@unittest.skip(reason="""MobileViTV2 does not output attentions""" )
def __snake_case (self ) -> List[Any]:
pass
@require_torch_multi_gpu
@unittest.skip(reason="""Got `CUDA error: misaligned address` for tests after this one being run.""" )
def __snake_case (self ) -> Tuple:
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def __snake_case (self ) -> Optional[int]:
pass
def __snake_case (self ) -> Any:
UpperCAmelCase_ , UpperCAmelCase_: str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_: Dict = model_class(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_: List[Any] = [*signature.parameters.keys()]
UpperCAmelCase_: Optional[Any] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1], SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Tuple:
UpperCAmelCase_: List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Tuple:
def check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ):
UpperCAmelCase_: Optional[int] = model_class(SCREAMING_SNAKE_CASE_ )
model.to(SCREAMING_SNAKE_CASE_ )
model.eval()
with torch.no_grad():
UpperCAmelCase_: List[Any] = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ ) )
UpperCAmelCase_: List[str] = outputs.hidden_states
UpperCAmelCase_: Dict = 5
self.assertEqual(len(SCREAMING_SNAKE_CASE_ ), SCREAMING_SNAKE_CASE_ )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
UpperCAmelCase_: str = 2
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ), [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor], )
divisor *= 2
self.assertEqual(self.model_tester.output_stride, divisor // 2 )
UpperCAmelCase_ , UpperCAmelCase_: Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_: Dict = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_: Union[str, Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_, SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Optional[int]:
UpperCAmelCase_: Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE_ )
def __snake_case (self ) -> Tuple:
UpperCAmelCase_: Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE_ )
@slow
def __snake_case (self ) -> List[Any]:
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_: int = MobileViTVaModel.from_pretrained(SCREAMING_SNAKE_CASE_ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase_ ():
"""simple docstring"""
UpperCAmelCase_: Dict = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class _a ( unittest.TestCase ):
@cached_property
def __snake_case (self ) -> str:
return (
MobileViTImageProcessor.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" )
if is_vision_available()
else None
)
@slow
def __snake_case (self ) -> Optional[Any]:
UpperCAmelCase_: Any = MobileViTVaForImageClassification.from_pretrained("""apple/mobilevitv2-1.0-imagenet1k-256""" ).to(
SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: List[str] = self.default_image_processor
UpperCAmelCase_: Optional[int] = prepare_img()
UpperCAmelCase_: Dict = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCAmelCase_: List[str] = model(**SCREAMING_SNAKE_CASE_ )
# verify the logits
UpperCAmelCase_: List[Any] = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape, SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Tuple = torch.tensor([-1.6336E00, -7.3204E-02, -5.1883E-01] ).to(SCREAMING_SNAKE_CASE_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3], SCREAMING_SNAKE_CASE_, atol=1E-4 ) )
@slow
def __snake_case (self ) -> Dict:
UpperCAmelCase_: List[Any] = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
UpperCAmelCase_: Union[str, Any] = model.to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
UpperCAmelCase_: str = prepare_img()
UpperCAmelCase_: Optional[int] = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCAmelCase_: Union[str, Any] = model(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Dict = outputs.logits
# verify the logits
UpperCAmelCase_: List[str] = torch.Size((1, 21, 32, 32) )
self.assertEqual(logits.shape, SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = torch.tensor(
[
[[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]],
[[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]],
[[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]],
], device=SCREAMING_SNAKE_CASE_, )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3], SCREAMING_SNAKE_CASE_, atol=1E-4 ) )
@slow
def __snake_case (self ) -> str:
UpperCAmelCase_: int = MobileViTVaForSemanticSegmentation.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
UpperCAmelCase_: Optional[int] = model.to(SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = MobileViTImageProcessor.from_pretrained("""shehan97/mobilevitv2-1.0-voc-deeplabv3""" )
UpperCAmelCase_: Optional[Any] = prepare_img()
UpperCAmelCase_: str = image_processor(images=SCREAMING_SNAKE_CASE_, return_tensors="""pt""" ).to(SCREAMING_SNAKE_CASE_ )
# forward pass
with torch.no_grad():
UpperCAmelCase_: Optional[Any] = model(**SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Optional[Any] = outputs.logits.detach().cpu()
UpperCAmelCase_: int = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_, target_sizes=[(50, 60)] )
UpperCAmelCase_: Any = torch.Size((50, 60) )
self.assertEqual(segmentation[0].shape, SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=SCREAMING_SNAKE_CASE_ )
UpperCAmelCase_: Dict = torch.Size((32, 32) )
self.assertEqual(segmentation[0].shape, SCREAMING_SNAKE_CASE_ )
| 147 |
import argparse
import os
import transformers
from .convert_slow_tokenizer import SLOW_TO_FAST_CONVERTERS
from .utils import logging
logging.set_verbosity_info()
_lowerCAmelCase : Any = logging.get_logger(__name__)
_lowerCAmelCase : Tuple = {name: getattr(transformers, name + "Fast") for name in SLOW_TO_FAST_CONVERTERS}
def lowerCAmelCase ( _lowerCAmelCase : Tuple , _lowerCAmelCase : Union[str, Any] , _lowerCAmelCase : Optional[Any] , _lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if tokenizer_name is not None and tokenizer_name not in TOKENIZER_CLASSES:
raise ValueError(F'''Unrecognized tokenizer name, should be one of {list(TOKENIZER_CLASSES.keys() )}.''' )
if tokenizer_name is None:
UpperCAmelCase__ = TOKENIZER_CLASSES
else:
UpperCAmelCase__ = {tokenizer_name: getattr(_lowerCAmelCase , tokenizer_name + "Fast" )}
logger.info(F'''Loading tokenizer classes: {tokenizer_names}''' )
for tokenizer_name in tokenizer_names:
UpperCAmelCase__ = TOKENIZER_CLASSES[tokenizer_name]
UpperCAmelCase__ = True
if checkpoint_name is None:
UpperCAmelCase__ = list(tokenizer_class.max_model_input_sizes.keys() )
else:
UpperCAmelCase__ = [checkpoint_name]
logger.info(F'''For tokenizer {tokenizer_class.__class__.__name__} loading checkpoints: {checkpoint_names}''' )
for checkpoint in checkpoint_names:
logger.info(F'''Loading {tokenizer_class.__class__.__name__} {checkpoint}''' )
# Load tokenizer
UpperCAmelCase__ = tokenizer_class.from_pretrained(_lowerCAmelCase , force_download=_lowerCAmelCase )
# Save fast tokenizer
logger.info(F'''Save fast tokenizer to {dump_path} with prefix {checkpoint} add_prefix {add_prefix}''' )
# For organization names we create sub-directories
if "/" in checkpoint:
UpperCAmelCase__ , UpperCAmelCase__ = checkpoint.split("/" )
UpperCAmelCase__ = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
elif add_prefix:
UpperCAmelCase__ = checkpoint
UpperCAmelCase__ = dump_path
else:
UpperCAmelCase__ = None
UpperCAmelCase__ = dump_path
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
if checkpoint in list(tokenizer.pretrained_vocab_files_map.values() )[0]:
UpperCAmelCase__ = list(tokenizer.pretrained_vocab_files_map.values() )[0][checkpoint]
UpperCAmelCase__ = file_path.split(_lowerCAmelCase )[-1][0]
if next_char == "/":
UpperCAmelCase__ = os.path.join(_lowerCAmelCase , _lowerCAmelCase )
UpperCAmelCase__ = None
logger.info(F'''=> {dump_path_full} with prefix {checkpoint_prefix_name}, add_prefix {add_prefix}''' )
UpperCAmelCase__ = tokenizer.save_pretrained(
_lowerCAmelCase , legacy_format=_lowerCAmelCase , filename_prefix=_lowerCAmelCase )
logger.info(F'''=> File names {file_names}''' )
for file_name in file_names:
if not file_name.endswith("tokenizer.json" ):
os.remove(_lowerCAmelCase )
logger.info(F'''=> removing {file_name}''' )
if __name__ == "__main__":
_lowerCAmelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--dump_path", default=None, type=str, required=True, help="Path to output generated fast tokenizer files."
)
parser.add_argument(
"--tokenizer_name",
default=None,
type=str,
help=(
F'''Optional tokenizer type selected in the list of {list(TOKENIZER_CLASSES.keys())}. If not given, will '''
"download and convert all the checkpoints from AWS."
),
)
parser.add_argument(
"--checkpoint_name",
default=None,
type=str,
help="Optional checkpoint name. If not given, will download and convert the canonical checkpoints from AWS.",
)
parser.add_argument(
"--force_download",
action="store_true",
help="Re-download checkpoints.",
)
_lowerCAmelCase : str = parser.parse_args()
convert_slow_checkpoint_to_fast(args.tokenizer_name, args.checkpoint_name, args.dump_path, args.force_download)
| 169 | 0 |
"""simple docstring"""
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
_a = logging.get_logger(__name__)
class _lowerCAmelCase ( lowerCamelCase__ ):
"""simple docstring"""
__UpperCAmelCase : str = ["input_features"]
def __init__( self : Any, UpperCAmelCase__ : Optional[int]=8_0, UpperCAmelCase__ : List[str]=1_6_0_0_0, UpperCAmelCase__ : Any=1_6_0, UpperCAmelCase__ : Union[str, Any]=3_0, UpperCAmelCase__ : Optional[Any]=4_0_0, UpperCAmelCase__ : Optional[int]=0.0, UpperCAmelCase__ : Optional[Any]=False, **UpperCAmelCase__ : List[Any], ):
super().__init__(
feature_size=__A, sampling_rate=__A, padding_value=__A, return_attention_mask=__A, **__A, )
__lowercase = n_fft
__lowercase = hop_length
__lowercase = chunk_length
__lowercase = chunk_length * sampling_rate
__lowercase = self.n_samples // hop_length
__lowercase = sampling_rate
__lowercase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2, num_mel_filters=__A, min_frequency=0.0, max_frequency=8_0_0_0.0, sampling_rate=__A, norm="slaney", mel_scale="slaney", )
def _lowercase ( self : Tuple, UpperCAmelCase__ : np.array ):
__lowercase = spectrogram(
__A, window_function(self.n_fft, "hann" ), frame_length=self.n_fft, hop_length=self.hop_length, power=2.0, mel_filters=self.mel_filters, log_mel="log10", )
__lowercase = log_spec[:, :-1]
__lowercase = np.maximum(__A, log_spec.max() - 8.0 )
__lowercase = (log_spec + 4.0) / 4.0
return log_spec
@staticmethod
# Copied from transformers.models.wav2vec2.feature_extraction_wav2vec2.Wav2Vec2FeatureExtractor.zero_mean_unit_var_norm
def _lowercase ( UpperCAmelCase__ : List[np.ndarray], UpperCAmelCase__ : List[np.ndarray], UpperCAmelCase__ : float = 0.0 ):
if attention_mask is not None:
__lowercase = np.array(__A, np.intaa )
__lowercase = []
for vector, length in zip(__A, attention_mask.sum(-1 ) ):
__lowercase = (vector - vector[:length].mean()) / np.sqrt(vector[:length].var() + 1E-7 )
if length < normed_slice.shape[0]:
__lowercase = padding_value
normed_input_values.append(__A )
else:
__lowercase = [(x - x.mean()) / np.sqrt(x.var() + 1E-7 ) for x in input_values]
return normed_input_values
def __call__( self : Any, UpperCAmelCase__ : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]], UpperCAmelCase__ : bool = True, UpperCAmelCase__ : Optional[int] = None, UpperCAmelCase__ : Optional[Union[str, TensorType]] = None, UpperCAmelCase__ : Optional[bool] = None, UpperCAmelCase__ : Optional[str] = "max_length", UpperCAmelCase__ : Optional[int] = None, UpperCAmelCase__ : Optional[int] = None, UpperCAmelCase__ : Optional[bool] = None, **UpperCAmelCase__ : Tuple, ):
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F"""The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a"""
F""" sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input"""
F""" was sampled with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"It is strongly recommended to pass the `sampling_rate` argument to this function. "
"Failing to do so can result in silent errors that might be hard to debug." )
__lowercase = isinstance(__A, np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F"""Only mono-channel audio is supported for input to {self}""" )
__lowercase = is_batched_numpy or (
isinstance(__A, (list, tuple) ) and (isinstance(raw_speech[0], (np.ndarray, tuple, list) ))
)
if is_batched:
__lowercase = [np.asarray([speech], dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__A, np.ndarray ):
__lowercase = np.asarray(__A, dtype=np.floataa )
elif isinstance(__A, np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__lowercase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__lowercase = [np.asarray([raw_speech] ).T]
__lowercase = BatchFeature({"input_features": raw_speech} )
# convert into correct format for padding
__lowercase = self.pad(
__A, padding=__A, max_length=max_length if max_length else self.n_samples, truncation=__A, pad_to_multiple_of=__A, return_attention_mask=return_attention_mask or do_normalize, )
# zero-mean and unit-variance normalization
if do_normalize:
__lowercase = self.zero_mean_unit_var_norm(
padded_inputs["input_features"], attention_mask=padded_inputs["attention_mask"], padding_value=self.padding_value, )
__lowercase = np.stack(padded_inputs["input_features"], axis=0 )
# make sure list is in array format
__lowercase = padded_inputs.get("input_features" ).transpose(2, 0, 1 )
__lowercase = [self._np_extract_fbank_features(__A ) for waveform in input_features[0]]
if isinstance(input_features[0], __A ):
__lowercase = [np.asarray(__A, dtype=np.floataa ) for feature in input_features]
else:
__lowercase = input_features
if return_attention_mask:
# rescale from sample (48000) to feature (3000)
__lowercase = padded_inputs['''attention_mask'''][:, :: self.hop_length]
if return_tensors is not None:
__lowercase = padded_inputs.convert_to_tensors(__A )
return padded_inputs
def _lowercase ( self : List[Any] ):
__lowercase = copy.deepcopy(self.__dict__ )
__lowercase = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
return output
| 361 |
"""simple docstring"""
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
_a = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def _A ( ) -> Tuple:
'''simple docstring'''
__lowercase = _ask_options(
"In which compute environment are you running?", ["This machine", "AWS (Amazon SageMaker)"], _convert_compute_environment, )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
__lowercase = get_sagemaker_input()
else:
__lowercase = get_cluster_input()
return config
def _A ( UpperCamelCase_ : Union[str, Any]=None) -> Union[str, Any]:
'''simple docstring'''
if subparsers is not None:
__lowercase = subparsers.add_parser("config", description=UpperCamelCase_)
else:
__lowercase = argparse.ArgumentParser("Accelerate config command", description=UpperCamelCase_)
parser.add_argument(
"--config_file", default=UpperCamelCase_, help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
), )
if subparsers is not None:
parser.set_defaults(func=UpperCamelCase_)
return parser
def _A ( UpperCamelCase_ : Dict) -> str:
'''simple docstring'''
__lowercase = get_user_input()
if args.config_file is not None:
__lowercase = args.config_file
else:
if not os.path.isdir(UpperCamelCase_):
os.makedirs(UpperCamelCase_)
__lowercase = default_yaml_config_file
if config_file.endswith(".json"):
config.to_json_file(UpperCamelCase_)
else:
config.to_yaml_file(UpperCamelCase_)
print(F"""accelerate configuration saved at {config_file}""")
def _A ( ) -> Optional[Any]:
'''simple docstring'''
__lowercase = config_command_parser()
__lowercase = parser.parse_args()
config_command(UpperCamelCase_)
if __name__ == "__main__":
main()
| 144 | 0 |
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
__lowercase = '''http://www.mocksite.com/file1.txt'''
__lowercase = '''"text": ["foo", "foo"]'''
__lowercase = '''6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'''
class lowerCamelCase_ :
'''simple docstring'''
a__ : Optional[Any] = 2_0_0
a__ : Union[str, Any] = {"""Content-Length""": """100"""}
a__ : Tuple = {}
def UpperCamelCase__ ( self , **__lowercase) -> Dict:
return [bytes(__lowercase , '''utf-8''')]
def lowerCamelCase ( *SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ):
'''simple docstring'''
return MockResponse()
@pytest.mark.parametrize('''urls_type''' , [str, list, dict] )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
import requests
monkeypatch.setattr(SCREAMING_SNAKE_CASE , '''request''' , SCREAMING_SNAKE_CASE )
__UpperCamelCase :Union[str, Any] = URL
if issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Tuple = url
elif issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Dict = [url]
elif issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Dict = {'''train''': url}
__UpperCamelCase :Tuple = '''dummy'''
__UpperCamelCase :Optional[Any] = '''downloads'''
__UpperCamelCase :Any = tmp_path
__UpperCamelCase :Tuple = DownloadConfig(
cache_dir=os.path.join(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) , use_etag=SCREAMING_SNAKE_CASE , )
__UpperCamelCase :Union[str, Any] = DownloadManager(dataset_name=SCREAMING_SNAKE_CASE , download_config=SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[Any] = dl_manager.download(SCREAMING_SNAKE_CASE )
__UpperCamelCase :int = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Union[str, Any] = [downloaded_paths]
__UpperCamelCase :Tuple = [urls]
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
assert "train" in downloaded_paths.keys()
__UpperCamelCase :Tuple = downloaded_paths.values()
__UpperCamelCase :int = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
__UpperCamelCase :Optional[int] = Path(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Tuple = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
__UpperCamelCase :Optional[int] = downloaded_path.read_text()
assert content == CONTENT
__UpperCamelCase :List[str] = downloaded_path.with_suffix('''.json''' )
assert metadata_downloaded_path.exists()
__UpperCamelCase :List[Any] = json.loads(metadata_downloaded_path.read_text() )
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize('''paths_type''' , [str, list, dict] )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Dict = str(SCREAMING_SNAKE_CASE )
if issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = filename
elif issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[Any] = [filename]
elif issubclass(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCamelCase :int = {'''train''': filename}
__UpperCamelCase :Any = '''dummy'''
__UpperCamelCase :Dict = xz_file.parent
__UpperCamelCase :str = '''extracted'''
__UpperCamelCase :str = DownloadConfig(
cache_dir=SCREAMING_SNAKE_CASE , use_etag=SCREAMING_SNAKE_CASE , )
__UpperCamelCase :int = DownloadManager(dataset_name=SCREAMING_SNAKE_CASE , download_config=SCREAMING_SNAKE_CASE )
__UpperCamelCase :int = dl_manager.extract(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Union[str, Any] = paths
for extracted_paths in [extracted_paths]:
if isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
__UpperCamelCase :Optional[int] = [extracted_paths]
__UpperCamelCase :Tuple = [paths]
elif isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
assert "train" in extracted_paths.keys()
__UpperCamelCase :Optional[int] = extracted_paths.values()
__UpperCamelCase :Dict = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
assert extracted_path == dl_manager.extracted_paths[input_path]
__UpperCamelCase :Union[str, Any] = Path(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Any = extracted_path.parts
assert parts[-1] == hash_url_to_filename(SCREAMING_SNAKE_CASE , etag=SCREAMING_SNAKE_CASE )
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
__UpperCamelCase :int = extracted_path.read_text()
__UpperCamelCase :List[Any] = text_file.read_text()
assert extracted_file_content == expected_file_content
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
assert path.endswith('''.jsonl''' )
for num_items, line in enumerate(SCREAMING_SNAKE_CASE , start=1 ):
__UpperCamelCase :Tuple = json.loads(line.decode('''utf-8''' ) )
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize('''archive_jsonl''' , ['''tar_jsonl_path''', '''zip_jsonl_path'''] )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Dict = request.getfixturevalue(SCREAMING_SNAKE_CASE )
__UpperCamelCase :Optional[int] = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(SCREAMING_SNAKE_CASE ) , start=1 ):
_test_jsonl(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert num_jsonl == 2
@pytest.mark.parametrize('''archive_nested_jsonl''' , ['''tar_nested_jsonl_path''', '''zip_nested_jsonl_path'''] )
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Dict = request.getfixturevalue(SCREAMING_SNAKE_CASE )
__UpperCamelCase :int = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(SCREAMING_SNAKE_CASE ) , start=1 ):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(SCREAMING_SNAKE_CASE ) , start=1 ):
_test_jsonl(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
assert num_tar == 1
assert num_jsonl == 2
def lowerCamelCase ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCamelCase :Union[str, Any] = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(SCREAMING_SNAKE_CASE ) , start=1 ):
assert os.path.basename(SCREAMING_SNAKE_CASE ) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 43 | import numpy as np
def lowerCamelCase ( SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 1e-12 , SCREAMING_SNAKE_CASE = 100 , ):
'''simple docstring'''
assert np.shape(SCREAMING_SNAKE_CASE )[0] == np.shape(SCREAMING_SNAKE_CASE )[1]
# Ensure proper dimensionality.
assert np.shape(SCREAMING_SNAKE_CASE )[0] == np.shape(SCREAMING_SNAKE_CASE )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(SCREAMING_SNAKE_CASE ) == np.iscomplexobj(SCREAMING_SNAKE_CASE )
__UpperCamelCase :List[Any] = np.iscomplexobj(SCREAMING_SNAKE_CASE )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(SCREAMING_SNAKE_CASE , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
__UpperCamelCase :str = False
__UpperCamelCase :int = 0
__UpperCamelCase :Optional[Any] = 0
__UpperCamelCase :Union[str, Any] = 1e12
while not convergence:
# Multiple matrix by the vector.
__UpperCamelCase :List[str] = np.dot(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Normalize the resulting output vector.
__UpperCamelCase :Tuple = w / np.linalg.norm(SCREAMING_SNAKE_CASE )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
__UpperCamelCase :int = vector.conj().T if is_complex else vector.T
__UpperCamelCase :Optional[int] = np.dot(SCREAMING_SNAKE_CASE , np.dot(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) )
# Check convergence.
__UpperCamelCase :Optional[Any] = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
__UpperCamelCase :Dict = True
__UpperCamelCase :List[Any] = lambda_
if is_complex:
__UpperCamelCase :Tuple = np.real(lambda_ )
return lambda_, vector
def lowerCamelCase ( ):
'''simple docstring'''
__UpperCamelCase :int = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
__UpperCamelCase :Optional[Any] = np.array([41, 4, 20] )
__UpperCamelCase :Any = real_input_matrix.astype(np.complexaaa )
__UpperCamelCase :Dict = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
__UpperCamelCase :Optional[int] = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
__UpperCamelCase :Any = real_input_matrix
__UpperCamelCase :int = real_vector
elif problem_type == "complex":
__UpperCamelCase :Tuple = complex_input_matrix
__UpperCamelCase :Optional[Any] = complex_vector
# Our implementation.
__UpperCamelCase , __UpperCamelCase :Dict = power_iteration(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
__UpperCamelCase , __UpperCamelCase :List[Any] = np.linalg.eigh(SCREAMING_SNAKE_CASE )
# Last eigenvalue is the maximum one.
__UpperCamelCase :List[Any] = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
__UpperCamelCase :str = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(SCREAMING_SNAKE_CASE ) - np.abs(SCREAMING_SNAKE_CASE ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration()
| 43 | 1 |
class _a :
"""simple docstring"""
def __init__( self : List[Any] , UpperCAmelCase : Any ):
# we need a list not a string, so do something to change the type
A_ = arr.split("," )
def __A ( self : Union[str, Any] ):
A_ = [int(self.array[0] )] * len(self.array )
A_ = [int(self.array[0] )] * len(self.array )
for i in range(1 , len(self.array ) ):
A_ = max(
int(self.array[i] ) + sum_value[i - 1] , int(self.array[i] ) )
A_ = max(sum_value[i] , rear[i - 1] )
return rear[len(self.array ) - 1]
if __name__ == "__main__":
__a :str = input('please input some numbers:')
__a :int = SubArray(whole_array)
__a :Any = array.solve_sub_array()
print(('the results is:', re)) | 359 |
import os
import tempfile
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from torch import nn
from transformers import (
Adafactor,
AdamW,
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_inverse_sqrt_schedule,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
def __snake_case ( __UpperCamelCase : Optional[Any] ,__UpperCamelCase : Dict=10 ):
"""simple docstring"""
A_ = []
for _ in range(__UpperCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
return lrs
def __snake_case ( __UpperCamelCase : Any ,__UpperCamelCase : Tuple=10 ):
"""simple docstring"""
A_ = []
for step in range(__UpperCamelCase ):
lrs.append(scheduler.get_lr()[0] )
scheduler.step()
if step == num_steps // 2:
with tempfile.TemporaryDirectory() as tmpdirname:
A_ = os.path.join(__UpperCamelCase ,"schedule.bin" )
torch.save(scheduler.state_dict() ,__UpperCamelCase )
A_ = torch.load(__UpperCamelCase )
scheduler.load_state_dict(__UpperCamelCase )
return lrs
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
def __A ( self : Any , UpperCAmelCase : int , UpperCAmelCase : List[Any] , UpperCAmelCase : List[str] ):
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for a, b in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertAlmostEqual(UpperCAmelCase , UpperCAmelCase , delta=UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase )
A_ = torch.tensor([0.4, 0.2, -0.5] )
A_ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A_ = AdamW(params=[w] , lr=2E-1 , weight_decay=0.0 )
for _ in range(100 ):
A_ = criterion(UpperCAmelCase , UpperCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
def __A ( self : Dict ):
A_ = torch.tensor([0.1, -0.2, -0.1] , requires_grad=UpperCAmelCase )
A_ = torch.tensor([0.4, 0.2, -0.5] )
A_ = nn.MSELoss()
# No warmup, constant schedule, no gradient clipping
A_ = Adafactor(
params=[w] , lr=1E-2 , eps=(1E-30, 1E-3) , clip_threshold=1.0 , decay_rate=-0.8 , betaa=UpperCAmelCase , weight_decay=0.0 , relative_step=UpperCAmelCase , scale_parameter=UpperCAmelCase , warmup_init=UpperCAmelCase , )
for _ in range(1000 ):
A_ = criterion(UpperCAmelCase , UpperCAmelCase )
loss.backward()
optimizer.step()
w.grad.detach_() # No zero_grad() function on simple tensors. we do it ourselves.
w.grad.zero_()
self.assertListAlmostEqual(w.tolist() , [0.4, 0.2, -0.5] , tol=1E-2 )
@require_torch
class _a ( unittest.TestCase ):
"""simple docstring"""
_lowerCamelCase : Optional[int] = nn.Linear(5_0 , 5_0 ) if is_torch_available() else None
_lowerCamelCase : Any = AdamW(m.parameters() , lr=1_0.0 ) if is_torch_available() else None
_lowerCamelCase : Any = 1_0
def __A ( self : str , UpperCAmelCase : int , UpperCAmelCase : Any , UpperCAmelCase : Tuple , UpperCAmelCase : Dict=None ):
self.assertEqual(len(UpperCAmelCase ) , len(UpperCAmelCase ) )
for a, b in zip(UpperCAmelCase , UpperCAmelCase ):
self.assertAlmostEqual(UpperCAmelCase , UpperCAmelCase , delta=UpperCAmelCase , msg=UpperCAmelCase )
def __A ( self : List[Any] ):
A_ = {"num_warmup_steps": 2, "num_training_steps": 10}
# schedulers doct format
# function: (sched_args_dict, expected_learning_rates)
A_ = {
get_constant_schedule: ({}, [10.0] * self.num_steps),
get_constant_schedule_with_warmup: (
{"num_warmup_steps": 4},
[0.0, 2.5, 5.0, 7.5, 10.0, 10.0, 10.0, 10.0, 10.0, 10.0],
),
get_linear_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 8.75, 7.5, 6.25, 5.0, 3.75, 2.5, 1.25],
),
get_cosine_schedule_with_warmup: (
{**common_kwargs},
[0.0, 5.0, 10.0, 9.61, 8.53, 6.91, 5.0, 3.08, 1.46, 0.38],
),
get_cosine_with_hard_restarts_schedule_with_warmup: (
{**common_kwargs, "num_cycles": 2},
[0.0, 5.0, 10.0, 8.53, 5.0, 1.46, 10.0, 8.53, 5.0, 1.46],
),
get_polynomial_decay_schedule_with_warmup: (
{**common_kwargs, "power": 2.0, "lr_end": 1E-7},
[0.0, 5.0, 10.0, 7.656, 5.625, 3.906, 2.5, 1.406, 0.625, 0.156],
),
get_inverse_sqrt_schedule: (
{"num_warmup_steps": 2},
[0.0, 5.0, 10.0, 8.165, 7.071, 6.325, 5.774, 5.345, 5.0, 4.714],
),
}
for scheduler_func, data in scheds.items():
A_ , A_ = data
A_ = scheduler_func(self.optimizer , **UpperCAmelCase )
self.assertEqual(len([scheduler.get_lr()[0]] ) , 1 )
A_ = unwrap_schedule(UpperCAmelCase , self.num_steps )
self.assertListAlmostEqual(
UpperCAmelCase , UpperCAmelCase , tol=1E-2 , msg=f'''failed for {scheduler_func} in normal scheduler''' , )
A_ = scheduler_func(self.optimizer , **UpperCAmelCase )
if scheduler_func.__name__ != "get_constant_schedule":
LambdaScheduleWrapper.wrap_scheduler(UpperCAmelCase ) # wrap to test picklability of the schedule
A_ = unwrap_and_save_reload_schedule(UpperCAmelCase , self.num_steps )
self.assertListEqual(UpperCAmelCase , UpperCAmelCase , msg=f'''failed for {scheduler_func} in save and reload''' )
class _a :
"""simple docstring"""
def __init__( self : List[str] , UpperCAmelCase : List[str] ):
A_ = fn
def __call__( self : Union[str, Any] , *UpperCAmelCase : str , **UpperCAmelCase : Optional[Any] ):
return self.fn(*UpperCAmelCase , **UpperCAmelCase )
@classmethod
def __A ( self : Dict , UpperCAmelCase : List[str] ):
A_ = list(map(self , scheduler.lr_lambdas ) ) | 329 | 0 |
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
SCREAMING_SNAKE_CASE__ : List[str] = logging.get_logger(__name__)
@add_end_docstrings(lowerCAmelCase__ )
class UpperCamelCase__ (lowerCAmelCase__ ):
'''simple docstring'''
def __init__( self , *UpperCamelCase__ , **UpperCamelCase__ ) -> Tuple:
super().__init__(*UpperCamelCase__ , **UpperCamelCase__ )
requires_backends(self , "decord" )
self.check_model_type(UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__=None , UpperCamelCase__=None , UpperCamelCase__=None ) -> Dict:
lowerCamelCase : List[Any] = {}
if frame_sampling_rate is not None:
lowerCamelCase : Tuple = frame_sampling_rate
if num_frames is not None:
lowerCamelCase : List[str] = num_frames
lowerCamelCase : int = {}
if top_k is not None:
lowerCamelCase : str = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , UpperCamelCase__ , **UpperCamelCase__ ) -> int:
return super().__call__(UpperCamelCase__ , **UpperCamelCase__ )
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__=None , UpperCamelCase__=1 ) -> Dict:
if num_frames is None:
lowerCamelCase : int = self.model.config.num_frames
if video.startswith("http://" ) or video.startswith("https://" ):
lowerCamelCase : Union[str, Any] = BytesIO(requests.get(UpperCamelCase__ ).content )
lowerCamelCase : str = VideoReader(UpperCamelCase__ )
videoreader.seek(0 )
lowerCamelCase : List[Any] = 0
lowerCamelCase : Optional[int] = num_frames * frame_sampling_rate - 1
lowerCamelCase : Dict = np.linspace(UpperCamelCase__ , UpperCamelCase__ , num=UpperCamelCase__ , dtype=np.intaa )
lowerCamelCase : List[Any] = videoreader.get_batch(UpperCamelCase__ ).asnumpy()
lowerCamelCase : Optional[int] = list(UpperCamelCase__ )
lowerCamelCase : int = self.image_processor(UpperCamelCase__ , return_tensors=self.framework )
return model_inputs
def _lowercase ( self , UpperCamelCase__ ) -> int:
lowerCamelCase : List[Any] = self.model(**UpperCamelCase__ )
return model_outputs
def _lowercase ( self , UpperCamelCase__ , UpperCamelCase__=5 ) -> int:
if top_k > self.model.config.num_labels:
lowerCamelCase : Any = self.model.config.num_labels
if self.framework == "pt":
lowerCamelCase : Any = model_outputs.logits.softmax(-1 )[0]
lowerCamelCase , lowerCamelCase : List[Any] = probs.topk(UpperCamelCase__ )
else:
raise ValueError(F'''Unsupported framework: {self.framework}''' )
lowerCamelCase : Union[str, Any] = scores.tolist()
lowerCamelCase : Dict = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(UpperCamelCase__ , UpperCamelCase__ )]
| 48 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE__ : List[Any] = {'processing_layoutxlm': ['LayoutXLMProcessor']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Optional[Any] = ['LayoutXLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE__ : Dict = ['LayoutXLMTokenizerFast']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE__ : str = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 48 | 1 |
import math
import os
import sys
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : List[str] = ''
try:
with open(__a , 'rb' ) as binary_file:
snake_case_ : int = binary_file.read()
for dat in data:
snake_case_ : int = f"""{dat:08b}"""
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def SCREAMING_SNAKE_CASE__ ( __a , __a , __a , __a ):
lexicon.pop(__a )
snake_case_ : Tuple = last_match_id
if math.loga(__a ).is_integer():
for curr_key in lexicon:
snake_case_ : Optional[Any] = '0' + lexicon[curr_key]
snake_case_ : str = bin(__a )[2:]
def SCREAMING_SNAKE_CASE__ ( __a ):
snake_case_ : Tuple = {'0': '0', '1': '1'}
snake_case_ : str = '', ''
snake_case_ : List[str] = len(__a )
for i in range(len(__a ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
snake_case_ : int = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(__a , __a , __a , __a )
index += 1
snake_case_ : Tuple = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
snake_case_ : str = lexicon[curr_string]
result += last_match_id
return result
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
snake_case_ : List[str] = os.path.getsize(__a )
snake_case_ : Optional[Any] = bin(__a )[2:]
snake_case_ : Tuple = len(__a )
return "0" * (length_length - 1) + file_length_binary + compressed
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
snake_case_ : int = 8
try:
with open(__a , 'wb' ) as opened_file:
snake_case_ : Tuple = [
to_write[i : i + byte_length]
for i in range(0 , len(__a ) , __a )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(__a , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
snake_case_ : Any = read_file_binary(__a )
snake_case_ : Union[str, Any] = compress_data(__a )
snake_case_ : Union[str, Any] = add_file_length(__a , __a )
write_file_binary(__a , __a )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 368 |
from __future__ import annotations
import math
def SCREAMING_SNAKE_CASE__ ( __a , __a ):
snake_case_ : Optional[int] = u
for i in range(1 , __a ):
snake_case_ : Optional[Any] = temp * (u - i)
return temp
def SCREAMING_SNAKE_CASE__ ( ):
snake_case_ : Dict = int(input('enter the numbers of values: ' ) )
snake_case_ : list[list[float]] = []
for _ in range(__a ):
y.append([] )
for i in range(__a ):
for j in range(__a ):
y[i].append(__a )
snake_case_ : str = 0
print('enter the values of parameters in a list: ' )
snake_case_ : int = list(map(__a , input().split() ) )
print('enter the values of corresponding parameters: ' )
for i in range(__a ):
snake_case_ : Union[str, Any] = float(input() )
snake_case_ : int = int(input('enter the value to interpolate: ' ) )
snake_case_ : List[Any] = (value - x[0]) / (x[1] - x[0])
# for calculating forward difference table
for i in range(1 , __a ):
for j in range(n - i ):
snake_case_ : int = y[j + 1][i - 1] - y[j][i - 1]
snake_case_ : str = y[0][0]
for i in range(1 , __a ):
summ += (ucal(__a , __a ) * y[0][i]) / math.factorial(__a )
print(f"""the value at {value} is {summ}""" )
if __name__ == "__main__":
main()
| 88 | 0 |
'''simple docstring'''
def _UpperCamelCase ( UpperCamelCase__ ):
if n_term == "":
return []
UpperCAmelCase__ : list = []
for temp in range(int(UpperCamelCase__ ) ):
series.append(f'''1/{temp + 1}''' if series else """1""" )
return series
if __name__ == "__main__":
__A =input('Enter the last number (nth term) of the Harmonic Series')
print('Formula of Harmonic Series => 1+1/2+1/3 ..... 1/n')
print(harmonic_series(nth_term)) | 163 |
'''simple docstring'''
from collections import namedtuple
import requests
from lxml import html # type: ignore
__A =namedtuple('covid_data', 'cases deaths recovered')
def _UpperCamelCase ( UpperCamelCase__ = "https://www.worldometers.info/coronavirus/" ):
UpperCAmelCase__ : Union[str, Any] = """//div[@class = \"maincounter-number\"]/span/text()"""
return covid_data(*html.fromstring(requests.get(UpperCamelCase__ ).content ).xpath(UpperCamelCase__ ) )
__A ='Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}'
print(fmt.format(*covid_stats())) | 163 | 1 |
'''simple docstring'''
def UpperCamelCase ( a , a , a ) -> bool:
'''simple docstring'''
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(a ) )
def UpperCamelCase ( a , a , a , a ) -> bool:
'''simple docstring'''
# Base Case
if index == len(a ):
return True
# Recursive Step
for i in range(a ):
if valid_coloring(graph[index] , a , a ):
# Color current vertex
__magic_name__ = i
# Validate coloring
if util_color(a , a , a , index + 1 ):
return True
# Backtrack
__magic_name__ = -1
return False
def UpperCamelCase ( a , a ) -> list[int]:
'''simple docstring'''
__magic_name__ = [-1] * len(a )
if util_color(a , a , a , 0 ):
return colored_vertices
return []
| 368 |
'''simple docstring'''
import functools
def UpperCamelCase ( a , a ) -> int:
'''simple docstring'''
__magic_name__ = len(a )
__magic_name__ = len(a )
@functools.cache
def min_distance(a , a ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
__magic_name__ = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , a ) , 1 + min_distance(a , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 98 | 0 |
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : List[Any] =logging.get_logger(__name__)
UpperCAmelCase : Any ={
"""microsoft/wavlm-base""": """https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json""",
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _lowercase (a_ ):
'''simple docstring'''
lowercase__ = """wavlm"""
def __init__( self , snake_case__=32 , snake_case__=768 , snake_case__=12 , snake_case__=12 , snake_case__=3072 , snake_case__="gelu" , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.0 , snake_case__=0.1 , snake_case__=0.1 , snake_case__=0.02 , snake_case__=1e-5 , snake_case__="group" , snake_case__="gelu" , snake_case__=(512, 512, 512, 512, 512, 512, 512) , snake_case__=(5, 2, 2, 2, 2, 2, 2) , snake_case__=(10, 3, 3, 3, 3, 2, 2) , snake_case__=False , snake_case__=128 , snake_case__=16 , snake_case__=320 , snake_case__=800 , snake_case__=False , snake_case__=True , snake_case__=0.05 , snake_case__=10 , snake_case__=2 , snake_case__=0.0 , snake_case__=10 , snake_case__=320 , snake_case__=2 , snake_case__=0.1 , snake_case__=100 , snake_case__=256 , snake_case__=256 , snake_case__=0.1 , snake_case__="mean" , snake_case__=False , snake_case__=False , snake_case__=256 , snake_case__=(512, 512, 512, 512, 1500) , snake_case__=(5, 3, 3, 1, 1) , snake_case__=(1, 2, 3, 1, 1) , snake_case__=512 , snake_case__=80 , snake_case__=0 , snake_case__=1 , snake_case__=2 , snake_case__=False , snake_case__=3 , snake_case__=2 , snake_case__=3 , snake_case__=None , **snake_case__ , ):
'''simple docstring'''
super().__init__(**snake_case__ , pad_token_id=snake_case__ , bos_token_id=snake_case__ , eos_token_id=snake_case__ )
UpperCamelCase_ = hidden_size
UpperCamelCase_ = feat_extract_norm
UpperCamelCase_ = feat_extract_activation
UpperCamelCase_ = list(snake_case__ )
UpperCamelCase_ = list(snake_case__ )
UpperCamelCase_ = list(snake_case__ )
UpperCamelCase_ = conv_bias
UpperCamelCase_ = num_buckets
UpperCamelCase_ = max_bucket_distance
UpperCamelCase_ = num_conv_pos_embeddings
UpperCamelCase_ = num_conv_pos_embedding_groups
UpperCamelCase_ = len(self.conv_dim )
UpperCamelCase_ = num_hidden_layers
UpperCamelCase_ = intermediate_size
UpperCamelCase_ = hidden_act
UpperCamelCase_ = num_attention_heads
UpperCamelCase_ = hidden_dropout
UpperCamelCase_ = attention_dropout
UpperCamelCase_ = activation_dropout
UpperCamelCase_ = feat_proj_dropout
UpperCamelCase_ = final_dropout
UpperCamelCase_ = layerdrop
UpperCamelCase_ = layer_norm_eps
UpperCamelCase_ = initializer_range
UpperCamelCase_ = num_ctc_classes
UpperCamelCase_ = vocab_size
UpperCamelCase_ = do_stable_layer_norm
UpperCamelCase_ = use_weighted_layer_sum
UpperCamelCase_ = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
F""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
F""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
UpperCamelCase_ = apply_spec_augment
UpperCamelCase_ = mask_time_prob
UpperCamelCase_ = mask_time_length
UpperCamelCase_ = mask_time_min_masks
UpperCamelCase_ = mask_feature_prob
UpperCamelCase_ = mask_feature_length
# parameters for pretraining with codevector quantized representations
UpperCamelCase_ = num_codevectors_per_group
UpperCamelCase_ = num_codevector_groups
UpperCamelCase_ = contrastive_logits_temperature
UpperCamelCase_ = num_negatives
UpperCamelCase_ = codevector_dim
UpperCamelCase_ = proj_codevector_dim
UpperCamelCase_ = diversity_loss_weight
# ctc loss
UpperCamelCase_ = ctc_loss_reduction
UpperCamelCase_ = ctc_zero_infinity
# adapter
UpperCamelCase_ = add_adapter
UpperCamelCase_ = adapter_kernel_size
UpperCamelCase_ = adapter_stride
UpperCamelCase_ = num_adapter_layers
UpperCamelCase_ = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
UpperCamelCase_ = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
UpperCamelCase_ = list(snake_case__ )
UpperCamelCase_ = list(snake_case__ )
UpperCamelCase_ = list(snake_case__ )
UpperCamelCase_ = xvector_output_dim
@property
def _lowerCamelCase ( self ):
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 )
| 128 |
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
UpperCAmelCase : Tuple =_symbol_database.Default()
UpperCAmelCase : List[Any] =_descriptor_pool.Default().AddSerializedFile(
b"""\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"""
)
UpperCAmelCase : Optional[int] =globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, """sentencepiece_model_pb2""", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
UpperCAmelCase : str =None
UpperCAmelCase : List[Any] =b"""H\003"""
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
UpperCAmelCase : str =45
UpperCAmelCase : Optional[Any] =1581
UpperCAmelCase : Dict =1517
UpperCAmelCase : str =1570
UpperCAmelCase : Optional[int] =1584
UpperCAmelCase : str =1793
UpperCAmelCase : Any =1795
UpperCAmelCase : Dict =1916
UpperCAmelCase : str =1864
UpperCAmelCase : Dict =1905
UpperCAmelCase : Union[str, Any] =1919
UpperCAmelCase : Any =2429
UpperCAmelCase : Dict =2208
UpperCAmelCase : int =2418
UpperCAmelCase : str =2323
UpperCAmelCase : Any =2407
# @@protoc_insertion_point(module_scope)
| 128 | 1 |
# Copyright 2023 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from accelerate import PartialState
from accelerate.utils.operations import broadcast, gather, gather_object, pad_across_processes, reduce
def lowerCAmelCase__ ( a__: Dict ) -> str:
'''simple docstring'''
return (torch.arange(state.num_processes ) + 1.0 + (state.num_processes * state.process_index)).to(state.device )
def lowerCAmelCase__ ( a__: Union[str, Any] ) -> Any:
'''simple docstring'''
_UpperCAmelCase = create_tensor(a__ )
_UpperCAmelCase = gather(a__ )
assert gathered_tensor.tolist() == list(range(1 , state.num_processes**2 + 1 ) )
def lowerCAmelCase__ ( a__: Dict ) -> Tuple:
'''simple docstring'''
_UpperCAmelCase = [state.process_index]
_UpperCAmelCase = gather_object(a__ )
assert len(a__ ) == state.num_processes, F'''{gathered_obj}, {len(a__ )} != {state.num_processes}'''
assert gathered_obj == list(range(state.num_processes ) ), F'''{gathered_obj} != {list(range(state.num_processes ) )}'''
def lowerCAmelCase__ ( a__: Any ) -> int:
'''simple docstring'''
_UpperCAmelCase = create_tensor(a__ )
_UpperCAmelCase = broadcast(a__ )
assert broadcasted_tensor.shape == torch.Size([state.num_processes] )
assert broadcasted_tensor.tolist() == list(range(1 , state.num_processes + 1 ) )
def lowerCAmelCase__ ( a__: List[str] ) -> Dict:
'''simple docstring'''
if state.is_main_process:
_UpperCAmelCase = torch.arange(state.num_processes + 1 ).to(state.device )
else:
_UpperCAmelCase = torch.arange(state.num_processes ).to(state.device )
_UpperCAmelCase = pad_across_processes(a__ )
assert padded_tensor.shape == torch.Size([state.num_processes + 1] )
if not state.is_main_process:
assert padded_tensor.tolist() == list(range(0 , state.num_processes ) ) + [0]
def lowerCAmelCase__ ( a__: Optional[Any] ) -> str:
'''simple docstring'''
if state.num_processes != 2:
return
_UpperCAmelCase = create_tensor(a__ )
_UpperCAmelCase = reduce(a__ , 'sum' )
_UpperCAmelCase = torch.tensor([4.0, 6] ).to(state.device )
assert torch.allclose(a__ , a__ ), F'''{reduced_tensor} != {truth_tensor}'''
def lowerCAmelCase__ ( a__: Any ) -> Any:
'''simple docstring'''
if state.num_processes != 2:
return
_UpperCAmelCase = create_tensor(a__ )
_UpperCAmelCase = reduce(a__ , 'mean' )
_UpperCAmelCase = torch.tensor([2.0, 3] ).to(state.device )
assert torch.allclose(a__ , a__ ), F'''{reduced_tensor} != {truth_tensor}'''
def lowerCAmelCase__ ( a__: str ) -> Optional[int]:
'''simple docstring'''
main()
def lowerCAmelCase__ ( ) -> Dict:
'''simple docstring'''
_UpperCAmelCase = PartialState()
state.print(F'''State: {state}''' )
state.print('testing gather' )
test_gather(a__ )
state.print('testing gather_object' )
test_gather_object(a__ )
state.print('testing broadcast' )
test_broadcast(a__ )
state.print('testing pad_across_processes' )
test_pad_across_processes(a__ )
state.print('testing reduce_sum' )
test_reduce_sum(a__ )
state.print('testing reduce_mean' )
test_reduce_mean(a__ )
if __name__ == "__main__":
main()
| 185 |
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
lowerCAmelCase__ :Dict = pd.read_csv('''sample_data.csv''', header=None)
lowerCAmelCase__ :int = df.shape[:1][0]
# If you're using some other dataset input the target column
lowerCAmelCase__ :Union[str, Any] = df.iloc[:, 1:2]
lowerCAmelCase__ :Optional[int] = actual_data.values.reshape(len_data, 1)
lowerCAmelCase__ :Tuple = MinMaxScaler().fit_transform(actual_data)
lowerCAmelCase__ :str = 1_0
lowerCAmelCase__ :Optional[Any] = 5
lowerCAmelCase__ :List[str] = 2_0
lowerCAmelCase__ :Any = len_data - periods * look_back
lowerCAmelCase__ :Union[str, Any] = actual_data[:division]
lowerCAmelCase__ :Tuple = actual_data[division - look_back :]
lowerCAmelCase__ , lowerCAmelCase__ :Optional[int] = [], []
lowerCAmelCase__ , lowerCAmelCase__ :str = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
lowerCAmelCase__ :Optional[Any] = np.array(train_x)
lowerCAmelCase__ :Any = np.array(test_x)
lowerCAmelCase__ :Dict = np.array([list(i.ravel()) for i in train_y])
lowerCAmelCase__ :Tuple = np.array([list(i.ravel()) for i in test_y])
lowerCAmelCase__ :Optional[int] = Sequential()
model.add(LSTM(1_2_8, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(6_4, input_shape=(1_2_8, 1)))
model.add(Dense(forward_days))
model.compile(loss='''mean_squared_error''', optimizer='''adam''')
lowerCAmelCase__ :List[Any] = model.fit(
x_train, y_train, epochs=1_5_0, verbose=1, shuffle=True, batch_size=4
)
lowerCAmelCase__ :Optional[Any] = model.predict(x_test)
| 185 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
A__ : Dict = {
'configuration_bigbird_pegasus': [
'BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP',
'BigBirdPegasusConfig',
'BigBirdPegasusOnnxConfig',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__ : List[Any] = [
'BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST',
'BigBirdPegasusForCausalLM',
'BigBirdPegasusForConditionalGeneration',
'BigBirdPegasusForQuestionAnswering',
'BigBirdPegasusForSequenceClassification',
'BigBirdPegasusModel',
'BigBirdPegasusPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_CONFIG_ARCHIVE_MAP,
BigBirdPegasusConfig,
BigBirdPegasusOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_bigbird_pegasus import (
BIGBIRD_PEGASUS_PRETRAINED_MODEL_ARCHIVE_LIST,
BigBirdPegasusForCausalLM,
BigBirdPegasusForConditionalGeneration,
BigBirdPegasusForQuestionAnswering,
BigBirdPegasusForSequenceClassification,
BigBirdPegasusModel,
BigBirdPegasusPreTrainedModel,
)
else:
import sys
A__ : List[Any] = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 144 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class _SCREAMING_SNAKE_CASE( A , unittest.TestCase ):
SCREAMING_SNAKE_CASE_ : Optional[Any] = ConsistencyModelPipeline
SCREAMING_SNAKE_CASE_ : Any = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
SCREAMING_SNAKE_CASE_ : Dict = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
SCREAMING_SNAKE_CASE_ : Optional[Any] = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''output_type''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
@property
def _UpperCamelCase ( self ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Tuple = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' ,subfolder='''test_unet''' ,)
return unet
@property
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Optional[int] = UNetaDModel.from_pretrained(
'''diffusers/consistency-models-test''' ,subfolder='''test_unet_class_cond''' ,)
return unet
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__=False ) -> Union[str, Any]:
"""simple docstring"""
if class_cond:
__SCREAMING_SNAKE_CASE :str = self.dummy_cond_unet
else:
__SCREAMING_SNAKE_CASE :Optional[Any] = self.dummy_uncond_unet
# Default to CM multistep sampler
__SCREAMING_SNAKE_CASE :List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40 ,sigma_min=0.0_0_2 ,sigma_max=8_0.0 ,)
__SCREAMING_SNAKE_CASE :List[str] = {
'''unet''': unet,
'''scheduler''': scheduler,
}
return components
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__ ,SCREAMING_SNAKE_CASE__=0 ) -> Dict:
"""simple docstring"""
if str(SCREAMING_SNAKE_CASE__ ).startswith('''mps''' ):
__SCREAMING_SNAKE_CASE :Tuple = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
else:
__SCREAMING_SNAKE_CASE :Optional[Any] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Dict = {
'''batch_size''': 1,
'''num_inference_steps''': None,
'''timesteps''': [22, 0],
'''generator''': generator,
'''output_type''': '''np''',
}
return inputs
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE :List[str] = self.get_dummy_components()
__SCREAMING_SNAKE_CASE :Optional[Any] = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Tuple = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :int = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE :List[str] = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE :Any = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _UpperCamelCase ( self ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Union[str, Any] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE :List[Any] = self.get_dummy_components(class_cond=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Union[str, Any] = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = 0
__SCREAMING_SNAKE_CASE :Optional[int] = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE :Dict = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE :List[Any] = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[str] = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE :Tuple = self.get_dummy_components()
__SCREAMING_SNAKE_CASE :Any = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :List[str] = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Dict = 1
__SCREAMING_SNAKE_CASE :List[str] = None
__SCREAMING_SNAKE_CASE :List[str] = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE :List[str] = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE :int = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = '''cpu''' # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE :Any = self.get_dummy_components(class_cond=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = ConsistencyModelPipeline(**SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Dict = pipe.to(SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = self.get_dummy_inputs(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :int = 1
__SCREAMING_SNAKE_CASE :Optional[Any] = None
__SCREAMING_SNAKE_CASE :List[Any] = 0
__SCREAMING_SNAKE_CASE :Any = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 32, 32, 3)
__SCREAMING_SNAKE_CASE :int = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE :Optional[Any] = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class _SCREAMING_SNAKE_CASE( unittest.TestCase ):
def _UpperCamelCase ( self ) -> List[str]:
"""simple docstring"""
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__=0 ,SCREAMING_SNAKE_CASE__=False ,SCREAMING_SNAKE_CASE__="cpu" ,SCREAMING_SNAKE_CASE__=torch.floataa ,SCREAMING_SNAKE_CASE__=(1, 3, 64, 64) ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :Tuple = torch.manual_seed(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Tuple = {
'''num_inference_steps''': None,
'''timesteps''': [22, 0],
'''class_labels''': 0,
'''generator''': generator,
'''output_type''': '''np''',
}
if get_fixed_latents:
__SCREAMING_SNAKE_CASE :int = self.get_fixed_latents(seed=SCREAMING_SNAKE_CASE__ ,device=SCREAMING_SNAKE_CASE__ ,dtype=SCREAMING_SNAKE_CASE__ ,shape=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :int = latents
return inputs
def _UpperCamelCase ( self ,SCREAMING_SNAKE_CASE__=0 ,SCREAMING_SNAKE_CASE__="cpu" ,SCREAMING_SNAKE_CASE__=torch.floataa ,SCREAMING_SNAKE_CASE__=(1, 3, 64, 64) ) -> int:
"""simple docstring"""
if type(SCREAMING_SNAKE_CASE__ ) == str:
__SCREAMING_SNAKE_CASE :int = torch.device(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = torch.Generator(device=SCREAMING_SNAKE_CASE__ ).manual_seed(SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :int = randn_tensor(SCREAMING_SNAKE_CASE__ ,generator=SCREAMING_SNAKE_CASE__ ,device=SCREAMING_SNAKE_CASE__ ,dtype=SCREAMING_SNAKE_CASE__ )
return latents
def _UpperCamelCase ( self ) -> Any:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' ,subfolder='''diffusers_cd_imagenet64_l2''' )
__SCREAMING_SNAKE_CASE :List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40 ,sigma_min=0.0_0_2 ,sigma_max=8_0.0 ,)
__SCREAMING_SNAKE_CASE :Dict = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ ,scheduler=SCREAMING_SNAKE_CASE__ )
pipe.to(torch_device=SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :int = self.get_inputs()
__SCREAMING_SNAKE_CASE :List[str] = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE :Union[str, Any] = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE :Dict = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' ,subfolder='''diffusers_cd_imagenet64_l2''' )
__SCREAMING_SNAKE_CASE :List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 ,sigma_min=0.0_0_2 ,sigma_max=8_0.0 ,)
__SCREAMING_SNAKE_CASE :Any = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ ,scheduler=SCREAMING_SNAKE_CASE__ )
pipe.to(torch_device=SCREAMING_SNAKE_CASE__ )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Optional[int] = self.get_inputs()
__SCREAMING_SNAKE_CASE :int = 1
__SCREAMING_SNAKE_CASE :int = None
__SCREAMING_SNAKE_CASE :Union[str, Any] = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE :str = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE :List[str] = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
@require_torch_a
def _UpperCamelCase ( self ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :List[Any] = UNetaDModel.from_pretrained('''diffusers/consistency_models''' ,subfolder='''diffusers_cd_imagenet64_l2''' )
__SCREAMING_SNAKE_CASE :Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 ,sigma_min=0.0_0_2 ,sigma_max=8_0.0 ,)
__SCREAMING_SNAKE_CASE :Any = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ ,scheduler=SCREAMING_SNAKE_CASE__ )
pipe.to(torch_device=SCREAMING_SNAKE_CASE__ ,torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :str = self.get_inputs(get_fixed_latents=SCREAMING_SNAKE_CASE__ ,device=SCREAMING_SNAKE_CASE__ )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=SCREAMING_SNAKE_CASE__ ,enable_math=SCREAMING_SNAKE_CASE__ ,enable_mem_efficient=SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE :Optional[Any] = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE :List[str] = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE :List[Any] = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@require_torch_a
def _UpperCamelCase ( self ) -> List[Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE :str = UNetaDModel.from_pretrained('''diffusers/consistency_models''' ,subfolder='''diffusers_cd_imagenet64_l2''' )
__SCREAMING_SNAKE_CASE :Dict = CMStochasticIterativeScheduler(
num_train_timesteps=40 ,sigma_min=0.0_0_2 ,sigma_max=8_0.0 ,)
__SCREAMING_SNAKE_CASE :int = ConsistencyModelPipeline(unet=SCREAMING_SNAKE_CASE__ ,scheduler=SCREAMING_SNAKE_CASE__ )
pipe.to(torch_device=SCREAMING_SNAKE_CASE__ ,torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :Dict = self.get_inputs(get_fixed_latents=SCREAMING_SNAKE_CASE__ ,device=SCREAMING_SNAKE_CASE__ )
__SCREAMING_SNAKE_CASE :int = 1
__SCREAMING_SNAKE_CASE :int = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=SCREAMING_SNAKE_CASE__ ,enable_math=SCREAMING_SNAKE_CASE__ ,enable_mem_efficient=SCREAMING_SNAKE_CASE__ ):
__SCREAMING_SNAKE_CASE :str = pipe(**SCREAMING_SNAKE_CASE__ ).images
assert image.shape == (1, 64, 64, 3)
__SCREAMING_SNAKE_CASE :str = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE :Optional[int] = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 | 191 | 0 |
from typing import Callable, List, Optional, Tuple, Union
import torch
from transformers import CLIPTextModel, CLIPTokenizer
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin, TransformeraDModel, VQModel
from ...schedulers import VQDiffusionScheduler
from ...utils import logging
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
_UpperCAmelCase : List[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class __lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase):
@register_to_config
def __init__( self: Any , _lowerCAmelCase: bool , _lowerCAmelCase: Optional[int] = None , _lowerCAmelCase: Optional[int] = None ):
super().__init__()
lowercase :Union[str, Any] = learnable
if self.learnable:
assert hidden_size is not None, "learnable=True requires `hidden_size` to be set"
assert length is not None, "learnable=True requires `length` to be set"
lowercase :Optional[int] = torch.zeros(_lowerCAmelCase , _lowerCAmelCase )
else:
lowercase :Any = None
lowercase :List[Any] = torch.nn.Parameter(_lowerCAmelCase )
class __lowerCAmelCase ( lowerCAmelCase):
_a = 42
_a = 42
_a = 42
_a = 42
_a = 42
_a = 42
def __init__( self: Any , _lowerCAmelCase: VQModel , _lowerCAmelCase: CLIPTextModel , _lowerCAmelCase: CLIPTokenizer , _lowerCAmelCase: TransformeraDModel , _lowerCAmelCase: VQDiffusionScheduler , _lowerCAmelCase: LearnedClassifierFreeSamplingEmbeddings , ):
super().__init__()
self.register_modules(
vqvae=_lowerCAmelCase , transformer=_lowerCAmelCase , text_encoder=_lowerCAmelCase , tokenizer=_lowerCAmelCase , scheduler=_lowerCAmelCase , learned_classifier_free_sampling_embeddings=_lowerCAmelCase , )
def SCREAMING_SNAKE_CASE ( self: str , _lowerCAmelCase: Tuple , _lowerCAmelCase: List[Any] , _lowerCAmelCase: Dict ):
lowercase :Union[str, Any] = len(_lowerCAmelCase ) if isinstance(_lowerCAmelCase , _lowerCAmelCase ) else 1
# get prompt text embeddings
lowercase :Tuple = self.tokenizer(
_lowerCAmelCase , padding="max_length" , max_length=self.tokenizer.model_max_length , return_tensors="pt" , )
lowercase :Any = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
lowercase :Dict = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"The following part of your input was truncated because CLIP can only handle sequences up to"
F" {self.tokenizer.model_max_length} tokens: {removed_text}" )
lowercase :str = text_input_ids[:, : self.tokenizer.model_max_length]
lowercase :Union[str, Any] = self.text_encoder(text_input_ids.to(self.device ) )[0]
# NOTE: This additional step of normalizing the text embeddings is from VQ-Diffusion.
# While CLIP does normalize the pooled output of the text transformer when combining
# the image and text embeddings, CLIP does not directly normalize the last hidden state.
#
# CLIP normalizing the pooled output.
# https://github.com/huggingface/transformers/blob/d92e22d1f28324f513f3080e5c47c071a3916721/src/transformers/models/clip/modeling_clip.py#L1052-L1053
lowercase :List[Any] = prompt_embeds / prompt_embeds.norm(dim=-1 , keepdim=_lowerCAmelCase )
# duplicate text embeddings for each generation per prompt
lowercase :List[Any] = prompt_embeds.repeat_interleave(_lowerCAmelCase , dim=0 )
if do_classifier_free_guidance:
if self.learned_classifier_free_sampling_embeddings.learnable:
lowercase :Union[str, Any] = self.learned_classifier_free_sampling_embeddings.embeddings
lowercase :int = negative_prompt_embeds.unsqueeze(0 ).repeat(_lowerCAmelCase , 1 , 1 )
else:
lowercase :Optional[int] = [""] * batch_size
lowercase :Any = text_input_ids.shape[-1]
lowercase :int = self.tokenizer(
_lowerCAmelCase , padding="max_length" , max_length=_lowerCAmelCase , truncation=_lowerCAmelCase , return_tensors="pt" , )
lowercase :List[str] = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# See comment for normalizing text embeddings
lowercase :Optional[Any] = negative_prompt_embeds / negative_prompt_embeds.norm(dim=-1 , keepdim=_lowerCAmelCase )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
lowercase :Any = negative_prompt_embeds.shape[1]
lowercase :str = negative_prompt_embeds.repeat(1 , _lowerCAmelCase , 1 )
lowercase :Union[str, Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _lowerCAmelCase , -1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
lowercase :Any = torch.cat([negative_prompt_embeds, prompt_embeds] )
return prompt_embeds
@torch.no_grad()
def __call__( self: Tuple , _lowerCAmelCase: Union[str, List[str]] , _lowerCAmelCase: int = 1_00 , _lowerCAmelCase: float = 5.0 , _lowerCAmelCase: float = 1.0 , _lowerCAmelCase: int = 1 , _lowerCAmelCase: Optional[Union[torch.Generator, List[torch.Generator]]] = None , _lowerCAmelCase: Optional[torch.FloatTensor] = None , _lowerCAmelCase: Optional[str] = "pil" , _lowerCAmelCase: bool = True , _lowerCAmelCase: Optional[Callable[[int, int, torch.FloatTensor], None]] = None , _lowerCAmelCase: int = 1 , ):
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase :Any = 1
elif isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase :Any = len(_lowerCAmelCase )
else:
raise ValueError(F"`prompt` has to be of type `str` or `list` but is {type(_lowerCAmelCase )}" )
lowercase :List[str] = batch_size * num_images_per_prompt
lowercase :Optional[Any] = guidance_scale > 1.0
lowercase :Union[str, Any] = self._encode_prompt(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(_lowerCAmelCase , _lowerCAmelCase ) or callback_steps <= 0)
):
raise ValueError(
F"`callback_steps` has to be a positive integer but is {callback_steps} of type"
F" {type(_lowerCAmelCase )}." )
# get the initial completely masked latents unless the user supplied it
lowercase :Dict = (batch_size, self.transformer.num_latent_pixels)
if latents is None:
lowercase :Union[str, Any] = self.transformer.num_vector_embeds - 1
lowercase :Union[str, Any] = torch.full(_lowerCAmelCase , _lowerCAmelCase ).to(self.device )
else:
if latents.shape != latents_shape:
raise ValueError(F"Unexpected latents shape, got {latents.shape}, expected {latents_shape}" )
if (latents < 0).any() or (latents >= self.transformer.num_vector_embeds).any():
raise ValueError(
"Unexpected latents value(s). All latents be valid embedding indices i.e. in the range 0,"
F" {self.transformer.num_vector_embeds - 1} (inclusive)." )
lowercase :Any = latents.to(self.device )
# set timesteps
self.scheduler.set_timesteps(_lowerCAmelCase , device=self.device )
lowercase :Optional[int] = self.scheduler.timesteps.to(self.device )
lowercase :Union[str, Any] = latents
for i, t in enumerate(self.progress_bar(_lowerCAmelCase ) ):
# expand the sample if we are doing classifier free guidance
lowercase :Union[str, Any] = torch.cat([sample] * 2 ) if do_classifier_free_guidance else sample
# predict the un-noised image
# model_output == `log_p_x_0`
lowercase :Union[str, Any] = self.transformer(_lowerCAmelCase , encoder_hidden_states=_lowerCAmelCase , timestep=_lowerCAmelCase ).sample
if do_classifier_free_guidance:
lowercase :Optional[int] = model_output.chunk(2 )
lowercase :Tuple = model_output_uncond + guidance_scale * (model_output_text - model_output_uncond)
model_output -= torch.logsumexp(_lowerCAmelCase , dim=1 , keepdim=_lowerCAmelCase )
lowercase :Tuple = self.truncate(_lowerCAmelCase , _lowerCAmelCase )
# remove `log(0)`'s (`-inf`s)
lowercase :Tuple = model_output.clamp(-70 )
# compute the previous noisy sample x_t -> x_t-1
lowercase :int = self.scheduler.step(_lowerCAmelCase , timestep=_lowerCAmelCase , sample=_lowerCAmelCase , generator=_lowerCAmelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
lowercase :List[str] = self.vqvae.config.vq_embed_dim
lowercase :str = (batch_size, self.transformer.height, self.transformer.width, embedding_channels)
lowercase :Tuple = self.vqvae.quantize.get_codebook_entry(_lowerCAmelCase , shape=_lowerCAmelCase )
lowercase :Optional[Any] = self.vqvae.decode(_lowerCAmelCase , force_not_quantize=_lowerCAmelCase ).sample
lowercase :int = (image / 2 + 0.5).clamp(0 , 1 )
lowercase :Optional[int] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowercase :Optional[Any] = self.numpy_to_pil(_lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( self: Any , _lowerCAmelCase: torch.FloatTensor , _lowerCAmelCase: float ):
lowercase :Optional[int] = torch.sort(_lowerCAmelCase , 1 , descending=_lowerCAmelCase )
lowercase :List[Any] = torch.exp(_lowerCAmelCase )
lowercase :List[Any] = sorted_p_x_0.cumsum(dim=1 ) < truncation_rate
# Ensure that at least the largest probability is not zeroed out
lowercase :Optional[Any] = torch.full_like(keep_mask[:, 0:1, :] , _lowerCAmelCase )
lowercase :Union[str, Any] = torch.cat((all_true, keep_mask) , dim=1 )
lowercase :Optional[int] = keep_mask[:, :-1, :]
lowercase :Any = keep_mask.gather(1 , indices.argsort(1 ) )
lowercase :List[str] = log_p_x_0.clone()
lowercase :Optional[Any] = -torch.inf # -inf = log(0)
return rv
| 366 |
from math import factorial
class __lowerCAmelCase :
def __init__( self: Optional[int] , _lowerCAmelCase: List[str] , _lowerCAmelCase: Tuple ):
lowercase :str = real
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase :Tuple = [1] * rank
else:
lowercase :List[str] = rank
def __repr__( self: Optional[Any] ):
return (
F"{self.real}+"
F"{'+'.join(str(_lowerCAmelCase )+'E'+str(n+1 )for n,dual in enumerate(self.duals ) )}"
)
def SCREAMING_SNAKE_CASE ( self: List[str] ):
lowercase :int = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , _lowerCAmelCase )
def __add__( self: int , _lowerCAmelCase: Tuple ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
return Dual(self.real + other , self.duals )
lowercase :Dict = self.duals.copy()
lowercase :List[str] = other.duals.copy()
if len(_lowerCAmelCase ) > len(_lowerCAmelCase ):
o_dual.extend([1] * (len(_lowerCAmelCase ) - len(_lowerCAmelCase )) )
elif len(_lowerCAmelCase ) < len(_lowerCAmelCase ):
s_dual.extend([1] * (len(_lowerCAmelCase ) - len(_lowerCAmelCase )) )
lowercase :Optional[Any] = []
for i in range(len(_lowerCAmelCase ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , _lowerCAmelCase )
_a = __add__
def __sub__( self: Optional[Any] , _lowerCAmelCase: Optional[int] ):
return self + other * -1
def __mul__( self: Optional[Any] , _lowerCAmelCase: Optional[int] ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase :str = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , _lowerCAmelCase )
lowercase :Dict = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , _lowerCAmelCase )
_a = __mul__
def __truediv__( self: List[str] , _lowerCAmelCase: Optional[int] ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase :Union[str, Any] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , _lowerCAmelCase )
raise ValueError
def __floordiv__( self: int , _lowerCAmelCase: Any ):
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
lowercase :str = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , _lowerCAmelCase )
raise ValueError
def __pow__( self: Any , _lowerCAmelCase: Tuple ):
if n < 0 or isinstance(_lowerCAmelCase , _lowerCAmelCase ):
raise ValueError("power must be a positive integer" )
if n == 0:
return 1
if n == 1:
return self
lowercase :Optional[int] = self
for _ in range(n - 1 ):
x *= self
return x
def UpperCAmelCase__ ( lowerCamelCase, lowerCamelCase, lowerCamelCase ):
if not callable(lowerCamelCase ):
raise ValueError("differentiate() requires a function as input for func" )
if not isinstance(lowerCamelCase, (float, int) ):
raise ValueError("differentiate() requires a float as input for position" )
if not isinstance(lowerCamelCase, lowerCamelCase ):
raise ValueError("differentiate() requires an int as input for order" )
lowercase :str = Dual(lowerCamelCase, 1 )
lowercase :Dict = func(lowerCamelCase )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
def UpperCAmelCase__ ( lowerCamelCase ):
return y**2 * y**4
print(differentiate(f, 9, 2))
| 158 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
snake_case : Any = {
'''configuration_lilt''': ['''LILT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''LiltConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : List[Any] = [
'''LILT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LiltForQuestionAnswering''',
'''LiltForSequenceClassification''',
'''LiltForTokenClassification''',
'''LiltModel''',
'''LiltPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
snake_case : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 240 |
from argparse import ArgumentParser
from .env import EnvironmentCommand
def __lowercase ( ):
a__ = ArgumentParser('Diffusers CLI tool' , usage='diffusers-cli <command> [<args>]' )
a__ = parser.add_subparsers(help='diffusers-cli command helpers' )
# Register commands
EnvironmentCommand.register_subcommand(__lowerCAmelCase )
# Let's go
a__ = parser.parse_args()
if not hasattr(__lowerCAmelCase , 'func' ):
parser.print_help()
exit(1 )
# Run
a__ = args.func(__lowerCAmelCase )
service.run()
if __name__ == "__main__":
main()
| 240 | 1 |
'''simple docstring'''
def snake_case_ (UpperCamelCase : int = 5000_0000 ):
'''simple docstring'''
_a = set()
_a = int((limit - 24) ** (1 / 2) )
_a = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , _a ) ) )
for primea in primes:
_a = primea * primea
for primea in primes:
_a = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
_a = primea * primea * primea * primea
_a = square + cube + tetr
if total >= limit:
break
ret.add(_a )
return len(_a )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 364 |
'''simple docstring'''
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def snake_case_ (UpperCamelCase : Namespace ):
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
_snake_case : List[Any] = '\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n'
class A ( _a ):
@staticmethod
def __lowerCAmelCase ( lowerCAmelCase_ : ArgumentParser ) -> Any:
"""simple docstring"""
_a = parser.add_parser(
'''convert''' , help='''CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.''' , )
train_parser.add_argument('''--model_type''' , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''Model\'s type.''' )
train_parser.add_argument(
'''--tf_checkpoint''' , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''TensorFlow checkpoint path or folder.''' )
train_parser.add_argument(
'''--pytorch_dump_output''' , type=lowerCAmelCase_ , required=lowerCAmelCase_ , help='''Path to the PyTorch saved model output.''' )
train_parser.add_argument('''--config''' , type=lowerCAmelCase_ , default='''''' , help='''Configuration file path or folder.''' )
train_parser.add_argument(
'''--finetuning_task_name''' , type=lowerCAmelCase_ , default=lowerCAmelCase_ , help='''Optional fine-tuning task name if the TF model was a finetuned model.''' , )
train_parser.set_defaults(func=lowerCAmelCase_ )
def __init__( self : int , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : str , lowerCAmelCase_ : str , *lowerCAmelCase_ : Optional[Any] , ) -> List[str]:
"""simple docstring"""
_a = logging.get_logger('''transformers-cli/converting''' )
self._logger.info(F'Loading model {model_type}' )
_a = model_type
_a = tf_checkpoint
_a = pytorch_dump_output
_a = config
_a = finetuning_task_name
def __lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(lowerCAmelCase_ )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase_ )
if "ckpt" in self._tf_checkpoint.lower():
_a = self._tf_checkpoint
_a = ''''''
else:
_a = self._tf_checkpoint
_a = ''''''
convert_transfo_xl_checkpoint_to_pytorch(
lowerCAmelCase_ , self._config , self._pytorch_dump_output , lowerCAmelCase_ )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase_ )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(lowerCAmelCase_ )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
'''--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]''' )
| 179 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def UpperCamelCase__ ( lowercase__ : Optional[Any] , lowercase__ : Optional[Any] , lowercase__ : List[str] ):
# Initialise PyTorch model
snake_case : Optional[Any] = MobileBertConfig.from_json_file(lowercase__ )
print(F'''Building PyTorch model from configuration: {config}''' )
snake_case : Dict = MobileBertForPreTraining(lowercase__ )
# Load weights from tf checkpoint
snake_case : str = load_tf_weights_in_mobilebert(lowercase__ , lowercase__ , lowercase__ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , lowercase__ )
if __name__ == "__main__":
__A = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
__A = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 148 |
"""simple docstring"""
from collections import deque
class lowerCamelCase__ :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : int = process_name # process name
snake_case : Dict = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
snake_case : Tuple = arrival_time
snake_case : Optional[int] = burst_time # remaining burst time
snake_case : int = 0 # total time of the process wait in ready queue
snake_case : List[Any] = 0 # time from arrival time to completion time
class lowerCamelCase__ :
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , ):
"""simple docstring"""
snake_case : str = number_of_queues
# time slice of queues that round robin algorithm applied
snake_case : Any = time_slices
# unfinished process is in this ready_queue
snake_case : Tuple = queue
# current time
snake_case : List[Any] = current_time
# finished process is in this sequence queue
snake_case : deque[Process] = deque()
def lowerCamelCase_ ( self ):
"""simple docstring"""
snake_case : Any = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : Any = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : str = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : Any = []
for i in range(len(SCREAMING_SNAKE_CASE ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
return [q.burst_time for q in queue]
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : deque[Process] = deque() # sequence deque of finished process
while len(SCREAMING_SNAKE_CASE ) != 0:
snake_case : Union[str, Any] = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(SCREAMING_SNAKE_CASE )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
snake_case : Union[str, Any] = 0
# set the process's turnaround time because it is finished
snake_case : Any = self.current_time - cp.arrival_time
# set the completion time
snake_case : Dict = self.current_time
# add the process to queue that has finished queue
finished.append(SCREAMING_SNAKE_CASE )
self.finish_queue.extend(SCREAMING_SNAKE_CASE ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def lowerCamelCase_ ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ):
"""simple docstring"""
snake_case : deque[Process] = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(SCREAMING_SNAKE_CASE ) ):
snake_case : str = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(SCREAMING_SNAKE_CASE )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
snake_case : Optional[Any] = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(SCREAMING_SNAKE_CASE )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
snake_case : List[str] = 0
# set the finish time
snake_case : List[Any] = self.current_time
# update the process' turnaround time because it is finished
snake_case : Union[str, Any] = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(SCREAMING_SNAKE_CASE )
self.finish_queue.extend(SCREAMING_SNAKE_CASE ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def lowerCamelCase_ ( self ):
"""simple docstring"""
for i in range(self.number_of_queues - 1 ):
snake_case , snake_case : List[str] = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
__A = Process("P1", 0, 53)
__A = Process("P2", 0, 17)
__A = Process("P3", 0, 68)
__A = Process("P4", 0, 24)
__A = 3
__A = [17, 25]
__A = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={"queue": deque([Pa, Pa, Pa, Pa])})
__A = Process("P1", 0, 53)
__A = Process("P2", 0, 17)
__A = Process("P3", 0, 68)
__A = Process("P4", 0, 24)
__A = 3
__A = [17, 25]
__A = deque([Pa, Pa, Pa, Pa])
__A = MLFQ(number_of_queues, time_slices, queue, 0)
__A = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
f'waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print completion times of processes(P1, P2, P3, P4)
print(
f'completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
f'turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}'
)
# print sequence of finished processes
print(
f'sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}'
)
| 148 | 1 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def lowerCamelCase ( a_ , a_ ) -> Optional[Any]:
lowerCAmelCase_ = []
for part_id in partition_order:
lowerCAmelCase_ = df.where(F'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(a_ ):
expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase ( ) -> Dict:
lowerCAmelCase_ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
lowerCAmelCase_ = spark.range(100 ).repartition(1 )
lowerCAmelCase_ = Spark(a_ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase ( ) -> Optional[int]:
lowerCAmelCase_ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
lowerCAmelCase_ = spark.range(10 ).repartition(2 )
lowerCAmelCase_ = [1, 0]
lowerCAmelCase_ = _generate_iterable_examples(a_ , a_ ) # Reverse the partitions.
lowerCAmelCase_ = _get_expected_row_ids_and_row_dicts_for_partition_order(a_ , a_ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
lowerCAmelCase_ , lowerCAmelCase_ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase ( ) -> Tuple:
lowerCAmelCase_ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
lowerCAmelCase_ = spark.range(10 ).repartition(1 )
lowerCAmelCase_ = SparkExamplesIterable(a_ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(a_ ):
assert row_id == F'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase ( ) -> Optional[int]:
lowerCAmelCase_ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
lowerCAmelCase_ = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('numpy.random.Generator' ) as generator_mock:
lowerCAmelCase_ = lambda a_ : x.reverse()
lowerCAmelCase_ = _get_expected_row_ids_and_row_dicts_for_partition_order(a_ , [2, 1, 0] )
lowerCAmelCase_ = SparkExamplesIterable(a_ ).shuffle_data_sources(a_ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(a_ ):
lowerCAmelCase_ , lowerCAmelCase_ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase ( ) -> Union[str, Any]:
lowerCAmelCase_ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
lowerCAmelCase_ = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
lowerCAmelCase_ = SparkExamplesIterable(a_ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
lowerCAmelCase_ = _get_expected_row_ids_and_row_dicts_for_partition_order(a_ , [0, 2] )
for i, (row_id, row_dict) in enumerate(a_ ):
lowerCAmelCase_ , lowerCAmelCase_ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
lowerCAmelCase_ = SparkExamplesIterable(a_ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
lowerCAmelCase_ = _get_expected_row_ids_and_row_dicts_for_partition_order(a_ , [1, 3] )
for i, (row_id, row_dict) in enumerate(a_ ):
lowerCAmelCase_ , lowerCAmelCase_ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase ( ) -> Tuple:
lowerCAmelCase_ = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
lowerCAmelCase_ = spark.range(100 ).repartition(1 )
lowerCAmelCase_ = Spark(a_ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 14 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, PNDMScheduler, StableDiffusionInpaintPipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu, slow
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class a_ ( a_ , a_ , a_ , unittest.TestCase ):
'''simple docstring'''
__a: int = StableDiffusionInpaintPipeline
__a: int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
__a: Tuple = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
__a: int = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
__a: List[str] = frozenset([] )
def _lowercase ( self ) -> Dict:
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase_ = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=9 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=3_2 , attention_head_dim=(2, 4) , use_linear_projection=lowercase_ , )
lowerCAmelCase_ = PNDMScheduler(skip_prk_steps=lowercase_ )
torch.manual_seed(0 )
lowerCAmelCase_ = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
lowerCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act='gelu' , projection_dim=5_1_2 , )
lowerCAmelCase_ = CLIPTextModel(lowercase_ )
lowerCAmelCase_ = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase_ = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _lowercase ( self , lowercase_ , lowercase_=0 ) -> int:
'''simple docstring'''
lowerCAmelCase_ = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(lowercase_ ) ).to(lowercase_ )
lowerCAmelCase_ = image.cpu().permute(0 , 2 , 3 , 1 )[0]
lowerCAmelCase_ = Image.fromarray(np.uinta(lowercase_ ) ).convert('RGB' ).resize((6_4, 6_4) )
lowerCAmelCase_ = Image.fromarray(np.uinta(image + 4 ) ).convert('RGB' ).resize((6_4, 6_4) )
if str(lowercase_ ).startswith('mps' ):
lowerCAmelCase_ = torch.manual_seed(lowercase_ )
else:
lowerCAmelCase_ = torch.Generator(device=lowercase_ ).manual_seed(lowercase_ )
lowerCAmelCase_ = {
'prompt': 'A painting of a squirrel eating a burger',
'image': init_image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def _lowercase ( self ) -> str:
'''simple docstring'''
lowerCAmelCase_ = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase_ = self.get_dummy_components()
lowerCAmelCase_ = StableDiffusionInpaintPipeline(**lowercase_ )
lowerCAmelCase_ = sd_pipe.to(lowercase_ )
sd_pipe.set_progress_bar_config(disable=lowercase_ )
lowerCAmelCase_ = self.get_dummy_inputs(lowercase_ )
lowerCAmelCase_ = sd_pipe(**lowercase_ ).images
lowerCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
lowerCAmelCase_ = np.array([0.47_27, 0.57_35, 0.39_41, 0.54_46, 0.59_26, 0.43_94, 0.50_62, 0.46_54, 0.44_76] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self ) -> Any:
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
'''simple docstring'''
def _lowercase ( self ) -> Tuple:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self ) -> Optional[Any]:
'''simple docstring'''
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
lowerCAmelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench.npy' )
lowerCAmelCase_ = 'stabilityai/stable-diffusion-2-inpainting'
lowerCAmelCase_ = StableDiffusionInpaintPipeline.from_pretrained(lowercase_ , safety_checker=lowercase_ )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
lowerCAmelCase_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(
prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , generator=lowercase_ , output_type='np' , )
lowerCAmelCase_ = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 9e-3
def _lowercase ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
lowerCAmelCase_ = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint'
'/yellow_cat_sitting_on_a_park_bench_fp16.npy' )
lowerCAmelCase_ = 'stabilityai/stable-diffusion-2-inpainting'
lowerCAmelCase_ = StableDiffusionInpaintPipeline.from_pretrained(
lowercase_ , torch_dtype=torch.floataa , safety_checker=lowercase_ , )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing()
lowerCAmelCase_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(
prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , generator=lowercase_ , output_type='np' , )
lowerCAmelCase_ = output.images[0]
assert image.shape == (5_1_2, 5_1_2, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def _lowercase ( self ) -> List[str]:
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/sd2-inpaint/init_image.png' )
lowerCAmelCase_ = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-inpaint/mask.png' )
lowerCAmelCase_ = 'stabilityai/stable-diffusion-2-inpainting'
lowerCAmelCase_ = PNDMScheduler.from_pretrained(lowercase_ , subfolder='scheduler' )
lowerCAmelCase_ = StableDiffusionInpaintPipeline.from_pretrained(
lowercase_ , safety_checker=lowercase_ , scheduler=lowercase_ , torch_dtype=torch.floataa , )
pipe.to(lowercase_ )
pipe.set_progress_bar_config(disable=lowercase_ )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
lowerCAmelCase_ = 'Face of a yellow cat, high resolution, sitting on a park bench'
lowerCAmelCase_ = torch.manual_seed(0 )
lowerCAmelCase_ = pipe(
prompt=lowercase_ , image=lowercase_ , mask_image=lowercase_ , generator=lowercase_ , num_inference_steps=2 , output_type='np' , )
lowerCAmelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 2.65 GB is allocated
assert mem_bytes < 2.65 * 1_0**9
| 14 | 1 |
"""simple docstring"""
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class lowerCAmelCase__ ( lowercase ):
'''simple docstring'''
lowerCamelCase__ = 42
lowerCamelCase__ = 42
lowerCamelCase__ = 42
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker | 96 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
A__ : int = logging.get_logger(__name__)
A__ : List[str] = {
'google/pix2struct-textcaps-base': (
'https://huggingface.co/google/pix2struct-textcaps-base/resolve/main/config.json'
),
}
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :List[Any] = "pix2struct_text_model"
_UpperCAmelCase :str = ["past_key_values"]
_UpperCAmelCase :str = {
"hidden_size": "hidden_size",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Dict , snake_case__ : Any=5_0244 , snake_case__ : Optional[int]=768 , snake_case__ : Dict=64 , snake_case__ : List[str]=2048 , snake_case__ : Dict=12 , snake_case__ : Any=12 , snake_case__ : Dict=32 , snake_case__ : int=128 , snake_case__ : List[str]=0.1 , snake_case__ : Optional[int]=1E-6 , snake_case__ : Any=1.0 , snake_case__ : int="gelu_new" , snake_case__ : Optional[Any]=0 , snake_case__ : Any=False , snake_case__ : Any=0 , snake_case__ : Any=1 , snake_case__ : Optional[int]=False , snake_case__ : Tuple=True , **snake_case__ : Any , ):
lowerCamelCase_ : List[str] =vocab_size
lowerCamelCase_ : Tuple =hidden_size
lowerCamelCase_ : Optional[int] =d_kv
lowerCamelCase_ : List[Any] =d_ff
lowerCamelCase_ : Tuple =num_layers
lowerCamelCase_ : Optional[int] =num_heads
lowerCamelCase_ : Any =relative_attention_num_buckets
lowerCamelCase_ : Optional[int] =relative_attention_max_distance
lowerCamelCase_ : List[Any] =dropout_rate
lowerCamelCase_ : str =layer_norm_epsilon
lowerCamelCase_ : int =initializer_factor
lowerCamelCase_ : str =use_cache
lowerCamelCase_ : int =eos_token_id
lowerCamelCase_ : Optional[Any] =decoder_start_token_id
# for backwards compatibility
lowerCamelCase_ : Optional[Any] =dense_act_fn
super().__init__(
pad_token_id=snake_case__ , eos_token_id=snake_case__ , decoder_start_token_id=snake_case__ , tie_word_embeddings=snake_case__ , is_decoder=snake_case__ , **snake_case__ , )
@classmethod
def UpperCAmelCase__ ( cls : Tuple , snake_case__ : Union[str, os.PathLike] , **snake_case__ : str ):
cls._set_token_in_kwargs(snake_case__ )
lowerCamelCase_ , lowerCamelCase_ : Any =cls.get_config_dict(snake_case__ , **snake_case__ )
# get the text config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
lowerCamelCase_ : List[Any] =config_dict["text_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case__ , **snake_case__ )
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :List[Any] = "pix2struct_vision_model"
def __init__( self : Optional[int] , snake_case__ : Tuple=768 , snake_case__ : str=768 , snake_case__ : Union[str, Any]=2048 , snake_case__ : Tuple=64 , snake_case__ : List[Any]=12 , snake_case__ : Dict=12 , snake_case__ : int="gelu_new" , snake_case__ : str=1E-6 , snake_case__ : int=0.0 , snake_case__ : int=0.0 , snake_case__ : Dict=1E-10 , snake_case__ : Tuple=1.0 , snake_case__ : int=4096 , snake_case__ : Tuple=32 , snake_case__ : List[str]=128 , **snake_case__ : List[Any] , ):
super().__init__(**snake_case__ )
lowerCamelCase_ : int =hidden_size
lowerCamelCase_ : List[Any] =patch_embed_hidden_size
lowerCamelCase_ : Tuple =d_ff
lowerCamelCase_ : List[Any] =dropout_rate
lowerCamelCase_ : Dict =num_hidden_layers
lowerCamelCase_ : List[str] =num_attention_heads
lowerCamelCase_ : Optional[Any] =initializer_range
lowerCamelCase_ : int =initializer_factor
lowerCamelCase_ : Any =attention_dropout
lowerCamelCase_ : List[str] =layer_norm_eps
lowerCamelCase_ : int =dense_act_fn
lowerCamelCase_ : Optional[Any] =seq_len
lowerCamelCase_ : Optional[int] =relative_attention_num_buckets
lowerCamelCase_ : Optional[int] =relative_attention_max_distance
lowerCamelCase_ : Dict =d_kv
@classmethod
def UpperCAmelCase__ ( cls : Optional[Any] , snake_case__ : Union[str, os.PathLike] , **snake_case__ : Dict ):
cls._set_token_in_kwargs(snake_case__ )
lowerCamelCase_ , lowerCamelCase_ : Dict =cls.get_config_dict(snake_case__ , **snake_case__ )
# get the vision config dict if we are loading from Pix2StructConfig
if config_dict.get("model_type" ) == "pix2struct":
lowerCamelCase_ : List[Any] =config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F"""You are using a model of type {config_dict['model_type']} to instantiate a model of type """
F"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(snake_case__ , **snake_case__ )
class lowercase__ ( snake_case__ ):
_UpperCAmelCase :str = "pix2struct"
_UpperCAmelCase :List[str] = True
def __init__( self : Tuple , snake_case__ : List[Any]=None , snake_case__ : Optional[Any]=None , snake_case__ : List[str]=1.0 , snake_case__ : List[Any]=0.02 , snake_case__ : List[Any]=False , snake_case__ : int=False , snake_case__ : Any=True , **snake_case__ : List[Any] , ):
super().__init__(tie_word_embeddings=snake_case__ , is_encoder_decoder=snake_case__ , **snake_case__ )
if text_config is None:
lowerCamelCase_ : Dict ={}
logger.info("text_config is None. Initializing the Pix2StructTextConfig with default values." )
if vision_config is None:
lowerCamelCase_ : int ={}
logger.info("vision_config is None. Initializing the Pix2StructVisionConfig with default values." )
lowerCamelCase_ : Any =PixaStructTextConfig(**snake_case__ )
lowerCamelCase_ : Optional[Any] =PixaStructVisionConfig(**snake_case__ )
lowerCamelCase_ : str =self.text_config.decoder_start_token_id
lowerCamelCase_ : Optional[int] =self.text_config.pad_token_id
lowerCamelCase_ : List[Any] =self.text_config.eos_token_id
lowerCamelCase_ : int =initializer_factor
lowerCamelCase_ : Optional[Any] =initializer_range
lowerCamelCase_ : Any =self.initializer_range
lowerCamelCase_ : List[Any] =self.initializer_range
lowerCamelCase_ : List[str] =is_vqa
@classmethod
def UpperCAmelCase__ ( cls : List[str] , snake_case__ : PixaStructTextConfig , snake_case__ : PixaStructVisionConfig , **snake_case__ : Optional[int] ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **snake_case__ )
def UpperCAmelCase__ ( self : Any ):
lowerCamelCase_ : Tuple =copy.deepcopy(self.__dict__ )
lowerCamelCase_ : Dict =self.text_config.to_dict()
lowerCamelCase_ : Union[str, Any] =self.vision_config.to_dict()
lowerCamelCase_ : Dict =self.__class__.model_type
return output
| 144 | 0 |
"""simple docstring"""
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
__A : List[Any] = logging.getLogger(__name__)
def __SCREAMING_SNAKE_CASE ( lowercase__=2 , lowercase__=3 , lowercase__=16 , lowercase__ = 10 , lowercase__ = 2 ):
"""simple docstring"""
def get_dataset(lowercase__ ):
A = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(lowercase__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
A = get_dataset(lowercase__ )
A = get_dataset(lowercase__ )
A = DataLoader(lowercase__ , shuffle=lowercase__ , batch_size=lowercase__ , num_workers=4 )
A = DataLoader(lowercase__ , shuffle=lowercase__ , batch_size=lowercase__ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def __SCREAMING_SNAKE_CASE ( lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__=None ):
"""simple docstring"""
A = []
for epoch in range(lowercase__ ):
# Train quickly
model.train()
for batch in dataloader:
A , A = batch
A = model(lowercase__ )
A = torch.nn.functional.mse_loss(lowercase__ , lowercase__ )
accelerator.backward(lowercase__ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class __UpperCamelCase ( nn.Module ):
def __init__(self : int):
super().__init__()
A = nn.Parameter(torch.randn(1))
A = nn.Parameter(torch.randn(1))
def SCREAMING_SNAKE_CASE__ (self : List[str] , __SCREAMING_SNAKE_CASE : Optional[int]):
return x * self.a + self.b
class __UpperCamelCase ( unittest.TestCase ):
def SCREAMING_SNAKE_CASE__ (self : int):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2)
A = DummyModel()
A = torch.optim.Adam(params=model.parameters() , lr=1E-3)
A , A = dummy_dataloaders()
A = ProjectConfiguration(total_limit=1 , project_dir=__SCREAMING_SNAKE_CASE , automatic_checkpoint_naming=__SCREAMING_SNAKE_CASE)
# Train baseline
A = Accelerator(project_config=__SCREAMING_SNAKE_CASE)
A , A , A , A = accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir)) , 1)
def SCREAMING_SNAKE_CASE__ (self : Dict):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2)
A = DummyModel()
A = torch.optim.Adam(params=model.parameters() , lr=1E-3)
A , A = dummy_dataloaders()
# Train baseline
A = Accelerator()
A , A , A , A = accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# Save initial
A = os.path.join(__SCREAMING_SNAKE_CASE , "initial")
accelerator.save_state(__SCREAMING_SNAKE_CASE)
((A) , (A)) = model.a.item(), model.b.item()
A = optimizer.state_dict()
A = train(3 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
((A) , (A)) = model.a.item(), model.b.item()
A = optimizer.state_dict()
# Train partially
set_seed(4_2)
A = DummyModel()
A = torch.optim.Adam(params=model.parameters() , lr=1E-3)
A , A = dummy_dataloaders()
A = Accelerator()
A , A , A , A = accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
accelerator.load_state(__SCREAMING_SNAKE_CASE)
((A) , (A)) = model.a.item(), model.b.item()
A = optimizer.state_dict()
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
A = train(2 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# Save everything
A = os.path.join(__SCREAMING_SNAKE_CASE , "checkpoint")
accelerator.save_state(__SCREAMING_SNAKE_CASE)
# Load everything back in and make sure all states work
accelerator.load_state(__SCREAMING_SNAKE_CASE)
test_rands += train(1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
((A) , (A)) = model.a.item(), model.b.item()
A = optimizer.state_dict()
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : Any):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2)
A = DummyModel()
A = torch.optim.Adam(params=model.parameters() , lr=1E-3)
A , A = dummy_dataloaders()
A = ProjectConfiguration(automatic_checkpoint_naming=__SCREAMING_SNAKE_CASE)
# Train baseline
A = Accelerator(project_dir=__SCREAMING_SNAKE_CASE , project_config=__SCREAMING_SNAKE_CASE)
A , A , A , A = accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# Save initial
accelerator.save_state()
((A) , (A)) = model.a.item(), model.b.item()
A = optimizer.state_dict()
A = train(3 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
((A) , (A)) = model.a.item(), model.b.item()
A = optimizer.state_dict()
# Train partially
set_seed(4_2)
A = DummyModel()
A = torch.optim.Adam(params=model.parameters() , lr=1E-3)
A , A = dummy_dataloaders()
A = ProjectConfiguration(iteration=1 , automatic_checkpoint_naming=__SCREAMING_SNAKE_CASE)
A = Accelerator(project_dir=__SCREAMING_SNAKE_CASE , project_config=__SCREAMING_SNAKE_CASE)
A , A , A , A = accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
accelerator.load_state(os.path.join(__SCREAMING_SNAKE_CASE , "checkpoints" , "checkpoint_0"))
((A) , (A)) = model.a.item(), model.b.item()
A = optimizer.state_dict()
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
A = train(2 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(__SCREAMING_SNAKE_CASE , "checkpoints" , "checkpoint_1"))
test_rands += train(1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
((A) , (A)) = model.a.item(), model.b.item()
A = optimizer.state_dict()
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
self.assertEqual(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
def SCREAMING_SNAKE_CASE__ (self : Tuple):
A = torch.tensor([1, 2, 3])
A = torch.tensor([2, 3, 4])
A = DummyModel()
A = torch.optim.Adam(net.parameters())
A = Accelerator()
with self.assertRaises(__SCREAMING_SNAKE_CASE) as ve:
accelerator.register_for_checkpointing(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
A = str(ve.exception)
self.assertTrue("Item at index 0" in message)
self.assertTrue("Item at index 1" in message)
self.assertFalse("Item at index 2" in message)
self.assertFalse("Item at index 3" in message)
def SCREAMING_SNAKE_CASE__ (self : Dict):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2)
A = DummyModel()
A = torch.optim.Adam(params=model.parameters() , lr=1E-3)
A = torch.optim.lr_scheduler.StepLR(__SCREAMING_SNAKE_CASE , step_size=1 , gamma=0.9_9)
A , A = dummy_dataloaders()
A = ProjectConfiguration(automatic_checkpoint_naming=__SCREAMING_SNAKE_CASE)
# Train baseline
A = Accelerator(project_dir=__SCREAMING_SNAKE_CASE , project_config=__SCREAMING_SNAKE_CASE)
A , A , A , A , A = accelerator.prepare(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
# Save initial
accelerator.save_state()
A = scheduler.state_dict()
train(3 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE)
self.assertNotEqual(__SCREAMING_SNAKE_CASE , scheduler.state_dict())
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(__SCREAMING_SNAKE_CASE , "checkpoints" , "checkpoint_0"))
self.assertEqual(__SCREAMING_SNAKE_CASE , scheduler.state_dict())
def SCREAMING_SNAKE_CASE__ (self : Tuple):
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(4_2)
A = DummyModel()
A = ProjectConfiguration(automatic_checkpoint_naming=__SCREAMING_SNAKE_CASE , total_limit=2)
# Train baseline
A = Accelerator(project_dir=__SCREAMING_SNAKE_CASE , project_config=__SCREAMING_SNAKE_CASE)
A = accelerator.prepare(__SCREAMING_SNAKE_CASE)
# Save 3 states:
for _ in range(1_1):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE , "checkpoints" , "checkpoint_0")))
self.assertTrue(os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE , "checkpoints" , "checkpoint_9")))
self.assertTrue(os.path.exists(os.path.join(__SCREAMING_SNAKE_CASE , "checkpoints" , "checkpoint_10")))
@require_cuda
def SCREAMING_SNAKE_CASE__ (self : int):
A = ["torchrun", F"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__)]
execute_subprocess_async(__SCREAMING_SNAKE_CASE , env=os.environ.copy())
if __name__ == "__main__":
__A : int = '/tmp/accelerate/state_checkpointing'
__A : str = DummyModel()
__A : List[Any] = torch.optim.Adam(params=model.parameters(), lr=1E-3)
__A : List[Any] = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
__A , __A : str = dummy_dataloaders()
__A : List[Any] = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
__A : List[str] = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
__A , __A , __A , __A , __A : Union[str, Any] = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
__A , __A : Optional[int] = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
__A : Tuple = group['params'][0].device
break
assert param_device.type == accelerator.device.type
__A : Optional[Any] = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
__A : Union[str, Any] = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
__A : int = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 57 |
"""simple docstring"""
__A : Dict = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
__A : List[Any] = [{'type': 'code', 'content': INSTALL_CONTENT}]
__A : List[Any] = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 57 | 1 |
"""simple docstring"""
from math import factorial
def lowerCAmelCase ( __UpperCamelCase = 2_0 ):
"""simple docstring"""
__A = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
__A = n // 2
return int(factorial(__UpperCamelCase ) / (factorial(__UpperCamelCase ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
lowercase_ = int(sys.argv[1])
print(solution(n))
except ValueError:
print('Invalid entry - please enter a number.')
| 266 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class snake_case ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[Any], _lowerCamelCase : int, _lowerCamelCase : List[Any]=7, _lowerCamelCase : int=3, _lowerCamelCase : Optional[Any]=18, _lowerCamelCase : Any=30, _lowerCamelCase : str=4_00, _lowerCamelCase : int=True, _lowerCamelCase : Union[str, Any]=None, _lowerCamelCase : str=True, ):
'''simple docstring'''
__A = size if size is not None else {'''height''': 18, '''width''': 18}
__A = parent
__A = batch_size
__A = num_channels
__A = image_size
__A = min_resolution
__A = max_resolution
__A = do_resize
__A = size
__A = apply_ocr
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class snake_case ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[int] = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _SCREAMING_SNAKE_CASE ( self : Dict ):
'''simple docstring'''
__A = LayoutLMvaImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : int ):
'''simple docstring'''
__A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_lowerCamelCase, '''do_resize''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''size''' ) )
self.assertTrue(hasattr(_lowerCamelCase, '''apply_ocr''' ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ):
'''simple docstring'''
__A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'''height''': 18, '''width''': 18} )
__A = self.image_processing_class.from_dict(self.image_processor_dict, size=42 )
self.assertEqual(image_processor.size, {'''height''': 42, '''width''': 42} )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase, Image.Image )
# Test not batched input
__A = image_processing(image_inputs[0], return_tensors='''pt''' )
self.assertEqual(
encoding.pixel_values.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
self.assertIsInstance(encoding.words, _lowerCamelCase )
self.assertIsInstance(encoding.boxes, _lowerCamelCase )
# Test batched
__A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
def _SCREAMING_SNAKE_CASE ( self : Any ):
'''simple docstring'''
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase, numpify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase, np.ndarray )
# Test not batched input
__A = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
# Test batched
__A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ):
'''simple docstring'''
# Initialize image_processing
__A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__A = prepare_image_inputs(self.image_processor_tester, equal_resolution=_lowerCamelCase, torchify=_lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(_lowerCamelCase, torch.Tensor )
# Test not batched input
__A = image_processing(image_inputs[0], return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
# Test batched
__A = image_processing(_lowerCamelCase, return_tensors='''pt''' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size['''height'''],
self.image_processor_tester.size['''width'''],
), )
def _SCREAMING_SNAKE_CASE ( self : List[str] ):
'''simple docstring'''
# with apply_OCR = True
__A = LayoutLMvaImageProcessor()
from datasets import load_dataset
__A = load_dataset('''hf-internal-testing/fixtures_docvqa''', split='''test''' )
__A = Image.open(ds[0]['''file'''] ).convert('''RGB''' )
__A = image_processing(_lowerCamelCase, return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape, (1, 3, 2_24, 2_24) )
self.assertEqual(len(encoding.words ), len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
__A = [['''11:14''', '''to''', '''11:39''', '''a.m''', '''11:39''', '''to''', '''11:44''', '''a.m.''', '''11:44''', '''a.m.''', '''to''', '''12:25''', '''p.m.''', '''12:25''', '''to''', '''12:58''', '''p.m.''', '''12:58''', '''to''', '''4:00''', '''p.m.''', '''2:00''', '''to''', '''5:00''', '''p.m.''', '''Coffee''', '''Break''', '''Coffee''', '''will''', '''be''', '''served''', '''for''', '''men''', '''and''', '''women''', '''in''', '''the''', '''lobby''', '''adjacent''', '''to''', '''exhibit''', '''area.''', '''Please''', '''move''', '''into''', '''exhibit''', '''area.''', '''(Exhibits''', '''Open)''', '''TRRF''', '''GENERAL''', '''SESSION''', '''(PART''', '''|)''', '''Presiding:''', '''Lee''', '''A.''', '''Waller''', '''TRRF''', '''Vice''', '''President''', '''“Introductory''', '''Remarks”''', '''Lee''', '''A.''', '''Waller,''', '''TRRF''', '''Vice''', '''Presi-''', '''dent''', '''Individual''', '''Interviews''', '''with''', '''TRRF''', '''Public''', '''Board''', '''Members''', '''and''', '''Sci-''', '''entific''', '''Advisory''', '''Council''', '''Mem-''', '''bers''', '''Conducted''', '''by''', '''TRRF''', '''Treasurer''', '''Philip''', '''G.''', '''Kuehn''', '''to''', '''get''', '''answers''', '''which''', '''the''', '''public''', '''refrigerated''', '''warehousing''', '''industry''', '''is''', '''looking''', '''for.''', '''Plus''', '''questions''', '''from''', '''the''', '''floor.''', '''Dr.''', '''Emil''', '''M.''', '''Mrak,''', '''University''', '''of''', '''Cal-''', '''ifornia,''', '''Chairman,''', '''TRRF''', '''Board;''', '''Sam''', '''R.''', '''Cecil,''', '''University''', '''of''', '''Georgia''', '''College''', '''of''', '''Agriculture;''', '''Dr.''', '''Stanley''', '''Charm,''', '''Tufts''', '''University''', '''School''', '''of''', '''Medicine;''', '''Dr.''', '''Robert''', '''H.''', '''Cotton,''', '''ITT''', '''Continental''', '''Baking''', '''Company;''', '''Dr.''', '''Owen''', '''Fennema,''', '''University''', '''of''', '''Wis-''', '''consin;''', '''Dr.''', '''Robert''', '''E.''', '''Hardenburg,''', '''USDA.''', '''Questions''', '''and''', '''Answers''', '''Exhibits''', '''Open''', '''Capt.''', '''Jack''', '''Stoney''', '''Room''', '''TRRF''', '''Scientific''', '''Advisory''', '''Council''', '''Meeting''', '''Ballroom''', '''Foyer''']] # noqa: E231
__A = [[[1_41, 57, 2_14, 69], [2_28, 58, 2_52, 69], [1_41, 75, 2_16, 88], [2_30, 79, 2_80, 88], [1_42, 2_60, 2_18, 2_73], [2_30, 2_61, 2_55, 2_73], [1_43, 2_79, 2_18, 2_90], [2_31, 2_82, 2_90, 2_91], [1_43, 3_42, 2_18, 3_54], [2_31, 3_45, 2_89, 3_55], [2_02, 3_62, 2_27, 3_73], [1_43, 3_79, 2_20, 3_92], [2_31, 3_82, 2_91, 3_94], [1_44, 7_14, 2_20, 7_26], [2_31, 7_15, 2_56, 7_26], [1_44, 7_32, 2_20, 7_45], [2_32, 7_36, 2_91, 7_47], [1_44, 7_69, 2_18, 7_82], [2_31, 7_70, 2_56, 7_82], [1_41, 7_88, 2_02, 8_01], [2_15, 7_91, 2_74, 8_04], [1_43, 8_26, 2_04, 8_38], [2_15, 8_26, 2_40, 8_38], [1_42, 8_44, 2_02, 8_57], [2_15, 8_47, 2_74, 8_59], [3_34, 57, 4_27, 69], [4_40, 57, 5_22, 69], [3_69, 75, 4_61, 88], [4_69, 75, 5_16, 88], [5_28, 76, 5_62, 88], [5_70, 76, 6_67, 88], [6_75, 75, 7_11, 87], [7_21, 79, 7_78, 88], [7_89, 75, 8_40, 88], [3_69, 97, 4_70, 1_07], [4_84, 94, 5_07, 1_06], [5_18, 94, 5_62, 1_07], [5_76, 94, 6_55, 1_10], [6_68, 94, 7_92, 1_09], [8_04, 95, 8_29, 1_07], [3_69, 1_13, 4_65, 1_25], [4_77, 1_16, 5_47, 1_25], [5_62, 1_13, 6_58, 1_25], [6_71, 1_16, 7_48, 1_25], [7_61, 1_13, 8_11, 1_25], [3_69, 1_31, 4_65, 1_43], [4_77, 1_33, 5_48, 1_43], [5_63, 1_30, 6_98, 1_45], [7_10, 1_30, 8_02, 1_46], [3_36, 1_71, 4_12, 1_83], [4_23, 1_71, 5_72, 1_83], [5_82, 1_70, 7_16, 1_84], [7_28, 1_71, 8_17, 1_87], [8_29, 1_71, 8_44, 1_86], [3_38, 1_97, 4_82, 2_12], [5_07, 1_96, 5_57, 2_09], [5_69, 1_96, 5_95, 2_08], [6_10, 1_96, 7_02, 2_09], [5_05, 2_14, 5_83, 2_26], [5_95, 2_14, 6_56, 2_27], [6_70, 2_15, 8_07, 2_27], [3_35, 2_59, 5_43, 2_74], [5_56, 2_59, 7_08, 2_72], [3_72, 2_79, 4_22, 2_91], [4_35, 2_79, 4_60, 2_91], [4_74, 2_79, 5_74, 2_92], [5_87, 2_78, 6_64, 2_91], [6_76, 2_78, 7_38, 2_91], [7_51, 2_79, 8_34, 2_91], [3_72, 2_98, 4_34, 3_10], [3_35, 3_41, 4_83, 3_54], [4_97, 3_41, 6_55, 3_54], [6_67, 3_41, 7_28, 3_54], [7_40, 3_41, 8_25, 3_54], [3_35, 3_60, 4_30, 3_72], [4_42, 3_60, 5_34, 3_72], [5_45, 3_59, 6_87, 3_72], [6_97, 3_60, 7_54, 3_72], [7_65, 3_60, 8_23, 3_73], [3_34, 3_78, 4_28, 3_91], [4_40, 3_78, 5_77, 3_94], [5_90, 3_78, 7_05, 3_91], [7_20, 3_78, 8_01, 3_91], [3_34, 3_97, 4_00, 4_09], [3_70, 4_16, 5_29, 4_29], [5_44, 4_16, 5_76, 4_32], [5_87, 4_16, 6_65, 4_28], [6_77, 4_16, 8_14, 4_29], [3_72, 4_35, 4_52, 4_50], [4_65, 4_34, 4_95, 4_47], [5_11, 4_34, 6_00, 4_47], [6_11, 4_36, 6_37, 4_47], [6_49, 4_36, 6_94, 4_51], [7_05, 4_38, 8_24, 4_47], [3_69, 4_53, 4_52, 4_66], [4_64, 4_54, 5_09, 4_66], [5_22, 4_53, 6_11, 4_69], [6_25, 4_53, 7_92, 4_69], [3_70, 4_72, 5_56, 4_88], [5_70, 4_72, 6_84, 4_87], [6_97, 4_72, 7_18, 4_85], [7_32, 4_72, 8_35, 4_88], [3_69, 4_90, 4_11, 5_03], [4_25, 4_90, 4_84, 5_03], [4_96, 4_90, 6_35, 5_06], [6_45, 4_90, 7_07, 5_03], [7_18, 4_91, 7_61, 5_03], [7_71, 4_90, 8_40, 5_03], [3_36, 5_10, 3_74, 5_21], [3_88, 5_10, 4_47, 5_22], [4_60, 5_10, 4_89, 5_21], [5_03, 5_10, 5_80, 5_22], [5_92, 5_09, 7_36, 5_25], [7_45, 5_09, 7_70, 5_22], [7_81, 5_09, 8_40, 5_22], [3_38, 5_28, 4_34, 5_41], [4_48, 5_28, 5_96, 5_41], [6_09, 5_27, 6_87, 5_40], [7_00, 5_28, 7_92, 5_41], [3_36, 5_46, 3_97, 5_59], [4_07, 5_46, 4_31, 5_59], [4_43, 5_46, 5_25, 5_60], [5_37, 5_46, 6_80, 5_62], [6_88, 5_46, 7_14, 5_59], [7_22, 5_46, 8_37, 5_62], [3_36, 5_65, 4_49, 5_81], [4_61, 5_65, 4_85, 5_77], [4_97, 5_65, 6_65, 5_81], [6_81, 5_65, 7_18, 5_77], [7_32, 5_65, 8_37, 5_80], [3_37, 5_84, 4_38, 5_97], [4_52, 5_83, 5_21, 5_96], [5_35, 5_84, 6_77, 5_99], [6_90, 5_83, 7_87, 5_96], [8_01, 5_83, 8_25, 5_96], [3_38, 6_02, 4_78, 6_15], [4_92, 6_02, 5_30, 6_14], [5_43, 6_02, 6_38, 6_15], [6_50, 6_02, 6_76, 6_14], [6_88, 6_02, 7_88, 6_15], [8_02, 6_02, 8_43, 6_14], [3_37, 6_21, 5_02, 6_33], [5_16, 6_21, 6_15, 6_37], [6_29, 6_21, 7_74, 6_36], [7_89, 6_21, 8_27, 6_33], [3_37, 6_39, 4_18, 6_52], [4_32, 6_40, 5_71, 6_53], [5_87, 6_39, 7_31, 6_55], [7_43, 6_39, 7_69, 6_52], [7_80, 6_39, 8_41, 6_52], [3_38, 6_58, 4_40, 6_73], [4_55, 6_58, 4_91, 6_70], [5_08, 6_58, 6_02, 6_71], [6_16, 6_58, 6_38, 6_70], [6_54, 6_58, 8_35, 6_74], [3_37, 6_77, 4_29, 6_89], [3_37, 7_14, 4_82, 7_26], [4_95, 7_14, 5_48, 7_26], [5_61, 7_14, 6_83, 7_26], [3_38, 7_70, 4_61, 7_82], [4_74, 7_69, 5_54, 7_85], [4_89, 7_88, 5_62, 8_03], [5_76, 7_88, 6_43, 8_01], [6_56, 7_87, 7_51, 8_04], [7_64, 7_88, 8_44, 8_01], [3_34, 8_25, 4_21, 8_38], [4_30, 8_24, 5_74, 8_38], [5_84, 8_24, 7_23, 8_41], [3_35, 8_44, 4_50, 8_57], [4_64, 8_43, 5_83, 8_60], [6_28, 8_62, 7_55, 8_75], [7_69, 8_61, 8_48, 8_78]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words, _lowerCamelCase )
self.assertListEqual(encoding.boxes, _lowerCamelCase )
# with apply_OCR = False
__A = LayoutLMvaImageProcessor(apply_ocr=_lowerCamelCase )
__A = image_processing(_lowerCamelCase, return_tensors='''pt''' )
self.assertEqual(encoding.pixel_values.shape, (1, 3, 2_24, 2_24) )
| 266 | 1 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=True, __lowerCamelCase="pt" ):
SCREAMING_SNAKE_CASE_ = {'''add_prefix_space''': True} if isinstance(__lowerCamelCase, __lowerCamelCase ) and not line.startswith(''' ''' ) else {}
SCREAMING_SNAKE_CASE_ = padding_side
return tokenizer(
[line], max_length=__lowerCamelCase, padding='''max_length''' if pad_to_max_length else None, truncation=__lowerCamelCase, return_tensors=__lowerCamelCase, add_special_tokens=__lowerCamelCase, **__lowerCamelCase, )
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=None, ):
SCREAMING_SNAKE_CASE_ = input_ids.ne(__lowerCamelCase ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class UpperCamelCase__ ( __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
def __init__( self , _A , _A , _A , _A , _A="train" , _A=None , _A=None , _A=None , _A="" , ) -> Tuple:
super().__init__()
SCREAMING_SNAKE_CASE_ = Path(_A ).joinpath(type_path + '''.source''' )
SCREAMING_SNAKE_CASE_ = Path(_A ).joinpath(type_path + '''.target''' )
SCREAMING_SNAKE_CASE_ = self.get_char_lens(self.src_file )
SCREAMING_SNAKE_CASE_ = max_source_length
SCREAMING_SNAKE_CASE_ = max_target_length
assert min(self.src_lens ) > 0, F'''found empty line in {self.src_file}'''
SCREAMING_SNAKE_CASE_ = tokenizer
SCREAMING_SNAKE_CASE_ = prefix
if n_obs is not None:
SCREAMING_SNAKE_CASE_ = self.src_lens[:n_obs]
SCREAMING_SNAKE_CASE_ = src_lang
SCREAMING_SNAKE_CASE_ = tgt_lang
def __len__( self ) -> Dict:
return len(self.src_lens )
def __getitem__( self , _A ) -> Dict[str, torch.Tensor]:
SCREAMING_SNAKE_CASE_ = index + 1 # linecache starts at 1
SCREAMING_SNAKE_CASE_ = self.prefix + linecache.getline(str(self.src_file ) , _A ).rstrip('''\n''' )
SCREAMING_SNAKE_CASE_ = linecache.getline(str(self.tgt_file ) , _A ).rstrip('''\n''' )
assert source_line, F'''empty source line for index {index}'''
assert tgt_line, F'''empty tgt line for index {index}'''
# Need to add eos token manually for T5
if isinstance(self.tokenizer , _A ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
SCREAMING_SNAKE_CASE_ = (
self.tokenizer.question_encoder if isinstance(self.tokenizer , _A ) else self.tokenizer
)
SCREAMING_SNAKE_CASE_ = self.tokenizer.generator if isinstance(self.tokenizer , _A ) else self.tokenizer
SCREAMING_SNAKE_CASE_ = encode_line(_A , _A , self.max_source_length , '''right''' )
SCREAMING_SNAKE_CASE_ = encode_line(_A , _A , self.max_target_length , '''right''' )
SCREAMING_SNAKE_CASE_ = source_inputs['''input_ids'''].squeeze()
SCREAMING_SNAKE_CASE_ = target_inputs['''input_ids'''].squeeze()
SCREAMING_SNAKE_CASE_ = source_inputs['''attention_mask'''].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _UpperCamelCase ( _A ) -> Tuple:
return [len(_A ) for x in Path(_A ).open().readlines()]
def _UpperCamelCase ( self , _A ) -> Dict[str, torch.Tensor]:
SCREAMING_SNAKE_CASE_ = torch.stack([x['''input_ids'''] for x in batch] )
SCREAMING_SNAKE_CASE_ = torch.stack([x['''attention_mask'''] for x in batch] )
SCREAMING_SNAKE_CASE_ = torch.stack([x['''decoder_input_ids'''] for x in batch] )
SCREAMING_SNAKE_CASE_ = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer , _A )
else self.tokenizer.pad_token_id
)
SCREAMING_SNAKE_CASE_ = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer , _A )
else self.tokenizer.pad_token_id
)
SCREAMING_SNAKE_CASE_ = trim_batch(_A , _A )
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = trim_batch(_A , _A , attention_mask=_A )
SCREAMING_SNAKE_CASE_ = {
'''input_ids''': source_ids,
'''attention_mask''': source_mask,
'''decoder_input_ids''': y,
}
return batch
__UpperCAmelCase = getLogger(__name__)
def A__ ( __lowerCamelCase ):
return list(itertools.chain.from_iterable(__lowerCamelCase ) )
def A__ ( __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = get_git_info()
save_json(__lowerCamelCase, os.path.join(__lowerCamelCase, '''git_log.json''' ) )
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase=4, **__lowerCamelCase ):
with open(__lowerCamelCase, '''w''' ) as f:
json.dump(__lowerCamelCase, __lowerCamelCase, indent=__lowerCamelCase, **__lowerCamelCase )
def A__ ( __lowerCamelCase ):
with open(__lowerCamelCase ) as f:
return json.load(__lowerCamelCase )
def A__ ( ):
SCREAMING_SNAKE_CASE_ = git.Repo(search_parent_directories=__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = {
'''repo_id''': str(__lowerCamelCase ),
'''repo_sha''': str(repo.head.object.hexsha ),
'''repo_branch''': str(repo.active_branch ),
'''hostname''': str(socket.gethostname() ),
}
return repo_infos
def A__ ( __lowerCamelCase, __lowerCamelCase ):
return list(map(__lowerCamelCase, __lowerCamelCase ) )
def A__ ( __lowerCamelCase, __lowerCamelCase ):
with open(__lowerCamelCase, '''wb''' ) as f:
return pickle.dump(__lowerCamelCase, __lowerCamelCase )
def A__ ( __lowerCamelCase ):
def remove_articles(__lowerCamelCase ):
return re.sub(r'''\b(a|an|the)\b''', ''' ''', __lowerCamelCase )
def white_space_fix(__lowerCamelCase ):
return " ".join(text.split() )
def remove_punc(__lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(__lowerCamelCase ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(__lowerCamelCase ) ) ) )
def A__ ( __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = normalize_answer(__lowerCamelCase ).split()
SCREAMING_SNAKE_CASE_ = normalize_answer(__lowerCamelCase ).split()
SCREAMING_SNAKE_CASE_ = Counter(__lowerCamelCase ) & Counter(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = sum(common.values() )
if num_same == 0:
return 0
SCREAMING_SNAKE_CASE_ = 1.0 * num_same / len(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = 1.0 * num_same / len(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = (2 * precision * recall) / (precision + recall)
return fa
def A__ ( __lowerCamelCase, __lowerCamelCase ):
return normalize_answer(__lowerCamelCase ) == normalize_answer(__lowerCamelCase )
def A__ ( __lowerCamelCase, __lowerCamelCase ):
assert len(__lowerCamelCase ) == len(__lowerCamelCase )
SCREAMING_SNAKE_CASE_ = 0
for hypo, pred in zip(__lowerCamelCase, __lowerCamelCase ):
em += exact_match_score(__lowerCamelCase, __lowerCamelCase )
if len(__lowerCamelCase ) > 0:
em /= len(__lowerCamelCase )
return {"em": em}
def A__ ( __lowerCamelCase ):
return model_prefix.startswith('''rag''' )
def A__ ( __lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
SCREAMING_SNAKE_CASE_ = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
SCREAMING_SNAKE_CASE_ = '''dropout_rate'''
for p in extra_params:
if getattr(__lowerCamelCase, __lowerCamelCase, __lowerCamelCase ):
if not hasattr(__lowerCamelCase, __lowerCamelCase ) and not hasattr(__lowerCamelCase, equivalent_param[p] ):
logger.info('''config doesn\'t have a `{}` attribute'''.format(__lowerCamelCase ) )
delattr(__lowerCamelCase, __lowerCamelCase )
continue
SCREAMING_SNAKE_CASE_ = p if hasattr(__lowerCamelCase, __lowerCamelCase ) else equivalent_param[p]
setattr(__lowerCamelCase, __lowerCamelCase, getattr(__lowerCamelCase, __lowerCamelCase ) )
delattr(__lowerCamelCase, __lowerCamelCase )
return hparams, config
| 257 |
def A__ ( __lowerCamelCase ):
return sum(i for i in range(1, number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print("Program to check whether a number is a Perfect number or not...")
__UpperCAmelCase = int(input("Enter number: ").strip())
print(F"""{number} is {'' if perfect(number) else 'not '}a Perfect Number.""")
| 257 | 1 |
'''simple docstring'''
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class A__ :
"""simple docstring"""
def __init__( self : Optional[int] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Tuple=1_3 , lowerCAmelCase__ : Optional[Any]=3_2 , lowerCAmelCase__ : Dict=2 , lowerCAmelCase__ : Dict=3 , lowerCAmelCase__ : Union[str, Any]=1_6 , lowerCAmelCase__ : Any=[1, 2, 1] , lowerCAmelCase__ : int=[2, 2, 4] , lowerCAmelCase__ : int=2 , lowerCAmelCase__ : Optional[int]=2.0 , lowerCAmelCase__ : Optional[int]=True , lowerCAmelCase__ : Any=0.0 , lowerCAmelCase__ : str=0.0 , lowerCAmelCase__ : Optional[Any]=0.1 , lowerCAmelCase__ : Tuple="gelu" , lowerCAmelCase__ : Optional[Any]=False , lowerCAmelCase__ : Any=True , lowerCAmelCase__ : List[str]=0.02 , lowerCAmelCase__ : Union[str, Any]=1e-5 , lowerCAmelCase__ : str=True , lowerCAmelCase__ : List[str]=None , lowerCAmelCase__ : Union[str, Any]=True , lowerCAmelCase__ : Tuple=1_0 , lowerCAmelCase__ : Dict=8 , lowerCAmelCase__ : Tuple=["stage1", "stage2", "stage3"] , lowerCAmelCase__ : Tuple=[1, 2, 3] , ) -> Dict:
"""simple docstring"""
_UpperCAmelCase : List[str] = parent
_UpperCAmelCase : Tuple = batch_size
_UpperCAmelCase : Optional[int] = image_size
_UpperCAmelCase : str = patch_size
_UpperCAmelCase : Union[str, Any] = num_channels
_UpperCAmelCase : Optional[Any] = embed_dim
_UpperCAmelCase : str = depths
_UpperCAmelCase : Union[str, Any] = num_heads
_UpperCAmelCase : Optional[Any] = window_size
_UpperCAmelCase : Union[str, Any] = mlp_ratio
_UpperCAmelCase : Tuple = qkv_bias
_UpperCAmelCase : List[Any] = hidden_dropout_prob
_UpperCAmelCase : str = attention_probs_dropout_prob
_UpperCAmelCase : str = drop_path_rate
_UpperCAmelCase : int = hidden_act
_UpperCAmelCase : Optional[Any] = use_absolute_embeddings
_UpperCAmelCase : str = patch_norm
_UpperCAmelCase : Tuple = layer_norm_eps
_UpperCAmelCase : Tuple = initializer_range
_UpperCAmelCase : Optional[Any] = is_training
_UpperCAmelCase : List[Any] = scope
_UpperCAmelCase : List[str] = use_labels
_UpperCAmelCase : int = type_sequence_label_size
_UpperCAmelCase : Tuple = encoder_stride
_UpperCAmelCase : Optional[Any] = out_features
_UpperCAmelCase : Dict = out_indices
def _lowerCAmelCase ( self : str ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase : Optional[int] = None
if self.use_labels:
_UpperCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase : Union[str, Any] = self.get_config()
return config, pixel_values, labels
def _lowerCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _lowerCAmelCase ( self : Optional[int] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : Optional[int] ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : Union[str, Any] = MaskFormerSwinModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_UpperCAmelCase : List[Any] = model(UpperCamelCase__ )
_UpperCAmelCase : List[Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_UpperCAmelCase : str = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _lowerCAmelCase ( self : List[str] , lowerCAmelCase__ : Tuple , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : List[str] ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase : Any = MaskFormerSwinBackbone(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_UpperCAmelCase : List[Any] = model(UpperCamelCase__ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [1_3, 1_6, 1_6, 1_6] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [1_6, 3_2, 6_4] )
# verify ValueError
with self.parent.assertRaises(UpperCamelCase__ ):
_UpperCAmelCase : Optional[int] = ["stem"]
_UpperCAmelCase : Any = MaskFormerSwinBackbone(config=UpperCamelCase__ )
def _lowerCAmelCase ( self : Any ) -> Any:
"""simple docstring"""
_UpperCAmelCase : Tuple = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : Optional[int] = config_and_inputs
_UpperCAmelCase : Union[str, Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class A__ ( _A , _A , unittest.TestCase ):
"""simple docstring"""
UpperCamelCase_ : Union[str, Any] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
UpperCamelCase_ : Dict = {'''feature-extraction''': MaskFormerSwinModel} if is_torch_available() else {}
UpperCamelCase_ : Dict = False
UpperCamelCase_ : Tuple = False
UpperCamelCase_ : Any = False
UpperCamelCase_ : int = False
UpperCamelCase_ : str = False
def _lowerCAmelCase ( self : Any ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase : List[str] = MaskFormerSwinModelTester(self )
_UpperCAmelCase : Optional[Any] = ConfigTester(self , config_class=UpperCamelCase__ , embed_dim=3_7 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"
" `nn.DataParallel`"
) )
def _lowerCAmelCase ( self : List[str] ) -> Optional[int]:
"""simple docstring"""
pass
def _lowerCAmelCase ( self : str ) -> Dict:
"""simple docstring"""
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCAmelCase ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return
def _lowerCAmelCase ( self : str ) -> str:
"""simple docstring"""
_UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _lowerCAmelCase ( self : int ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*UpperCamelCase__ )
@unittest.skip("Swin does not use inputs_embeds" )
def _lowerCAmelCase ( self : Any ) -> int:
"""simple docstring"""
pass
@unittest.skip("Swin does not support feedforward chunking" )
def _lowerCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
pass
def _lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Dict = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def _lowerCAmelCase ( self : Tuple ) -> Dict:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase : Optional[Any] = model_class(UpperCamelCase__ )
_UpperCAmelCase : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase : str = [*signature.parameters.keys()]
_UpperCAmelCase : Optional[int] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
@unittest.skip(reason="MaskFormerSwin is only used as backbone and doesn't support output_attentions" )
def _lowerCAmelCase ( self : Tuple ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="MaskFormerSwin is only used as an internal backbone" )
def _lowerCAmelCase ( self : List[str] ) -> Dict:
"""simple docstring"""
pass
def _lowerCAmelCase ( self : Union[str, Any] , lowerCAmelCase__ : List[Any] , lowerCAmelCase__ : Any , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : Union[str, Any] ) -> Any:
"""simple docstring"""
_UpperCAmelCase : List[str] = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
_UpperCAmelCase : Dict = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
_UpperCAmelCase : Optional[int] = outputs.hidden_states
_UpperCAmelCase : Any = getattr(
self.model_tester , "expected_num_hidden_layers" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# Swin has a different seq_length
_UpperCAmelCase : List[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _lowerCAmelCase ( self : Dict ) -> Dict:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
_UpperCAmelCase : Optional[int] = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase : Optional[int] = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : Optional[int] = 3
_UpperCAmelCase : List[Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_UpperCAmelCase : Optional[Any] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase : Union[str, Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_UpperCAmelCase : Optional[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
_UpperCAmelCase : int = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase : Any = True
self.check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , (padded_height, padded_width) )
@unittest.skip(reason="MaskFormerSwin doesn't have pretrained checkpoints" )
def _lowerCAmelCase ( self : Optional[int] ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def _lowerCAmelCase ( self : List[str] ) -> List[Any]:
"""simple docstring"""
pass
@unittest.skip(reason="This will be fixed once MaskFormerSwin is replaced by native Swin" )
def _lowerCAmelCase ( self : Dict ) -> Optional[Any]:
"""simple docstring"""
pass
def _lowerCAmelCase ( self : Dict ) -> Any:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(lowerCAmelCase__ : Union[str, Any] ):
_UpperCAmelCase : List[str] = 0
return t
def check_equivalence(lowerCAmelCase__ : str , lowerCAmelCase__ : Dict , lowerCAmelCase__ : Optional[Any] , lowerCAmelCase__ : int={} ):
with torch.no_grad():
_UpperCAmelCase : List[Any] = model(**UpperCamelCase__ , return_dict=UpperCamelCase__ , **UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = model(**UpperCamelCase__ , return_dict=UpperCamelCase__ , **UpperCamelCase__ ).to_tuple()
def recursive_check(lowerCAmelCase__ : Optional[int] , lowerCAmelCase__ : List[Any] ):
if isinstance(UpperCamelCase__ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(UpperCamelCase__ , UpperCamelCase__ ):
recursive_check(UpperCamelCase__ , UpperCamelCase__ )
elif isinstance(UpperCamelCase__ , UpperCamelCase__ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(UpperCamelCase__ , UpperCamelCase__ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(UpperCamelCase__ ) , set_nan_tensor_to_zero(UpperCamelCase__ ) , atol=1e-5 ) , msg=(
"Tuple and dict output are not equal. Difference:"
F""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
F""" {torch.isnan(UpperCamelCase__ ).any()} and `inf`: {torch.isinf(UpperCamelCase__ )}. Dict has"""
F""" `nan`: {torch.isnan(UpperCamelCase__ ).any()} and `inf`: {torch.isinf(UpperCamelCase__ )}."""
) , )
recursive_check(UpperCamelCase__ , UpperCamelCase__ )
for model_class in self.all_model_classes:
_UpperCAmelCase : Any = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_UpperCAmelCase : int = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : Optional[Any] = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
_UpperCAmelCase : int = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : str = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
_UpperCAmelCase : Dict = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ )
check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , {"output_hidden_states": True} )
_UpperCAmelCase : str = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
_UpperCAmelCase : str = self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
check_equivalence(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , {"output_hidden_states": True} )
@require_torch
class A__ ( unittest.TestCase , _A ):
"""simple docstring"""
UpperCamelCase_ : Any = (MaskFormerSwinBackbone,) if is_torch_available() else ()
UpperCamelCase_ : List[str] = MaskFormerSwinConfig
def _lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase : Any = MaskFormerSwinModelTester(self )
def _lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase , _UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase : Dict = inputs_dict["pixel_values"].shape[0]
for backbone_class in self.all_model_classes:
_UpperCAmelCase : Optional[Any] = backbone_class(UpperCamelCase__ )
backbone.to(UpperCamelCase__ )
backbone.eval()
_UpperCAmelCase : Dict = backbone(**UpperCamelCase__ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , UpperCamelCase__ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
_UpperCAmelCase : str = backbone(**UpperCamelCase__ , output_hidden_states=UpperCamelCase__ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase : List[Any] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
_UpperCAmelCase : Optional[Any] = backbone(**UpperCamelCase__ , output_attentions=UpperCamelCase__ )
self.assertIsNotNone(outputs.attentions ) | 145 |
import argparse
from argparse import Namespace
import torch
from torch import nn
from transformers import XGLMConfig, XGLMForCausalLM
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = [
"""decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(A_, A_ )
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ , __magic_name__ = emb.weight.shape
__magic_name__ = nn.Linear(A_, A_, bias=A_ )
__magic_name__ = emb.weight.data
return lin_layer
def a__ ( A_ ):
'''simple docstring'''
__magic_name__ = torch.load(A_, map_location="""cpu""" )
__magic_name__ = Namespace(**checkpoint["""cfg"""]["""model"""] )
__magic_name__ = checkpoint["""model"""]
remove_ignore_keys_(A_ )
__magic_name__ = state_dict["""decoder.embed_tokens.weight"""].shape[0]
__magic_name__ = {key.replace("""decoder""", """model""" ): val for key, val in state_dict.items()}
__magic_name__ = XGLMConfig(
vocab_size=A_, max_position_embeddings=args.max_target_positions, num_layers=args.decoder_layers, attention_heads=args.decoder_attention_heads, ffn_dim=args.decoder_ffn_embed_dim, d_model=args.decoder_embed_dim, layerdrop=args.decoder_layerdrop, dropout=args.dropout, attention_dropout=args.attention_dropout, activation_dropout=args.activation_dropout, activation_function="""gelu""", scale_embedding=not args.no_scale_embedding, tie_word_embeddings=args.share_decoder_input_output_embed, )
__magic_name__ = XGLMForCausalLM(A_ )
__magic_name__ = model.load_state_dict(A_, strict=A_ )
print(A_ )
__magic_name__ = make_linear_from_emb(model.model.embed_tokens )
return model
if __name__ == "__main__":
__lowerCAmelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument('fairseq_path', type=str, help='path to a model.pt on local filesystem.')
parser.add_argument('pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
__lowerCAmelCase : List[str] = parser.parse_args()
__lowerCAmelCase : str = convert_fairseq_xglm_checkpoint_from_disk(args.fairseq_path)
model.save_pretrained(args.pytorch_dump_folder_path)
| 88 | 0 |
'''simple docstring'''
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class snake_case__ ( _lowerCAmelCase ):
A__ = 42
@flax_register_to_config
class snake_case__ ( nn.Module , _lowerCAmelCase , _lowerCAmelCase ):
A__ = 32
A__ = 4
A__ = 4
A__ = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
A__ = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
A__ = False
A__ = (320, 640, 1_280, 1_280)
A__ = 2
A__ = 8
A__ = None
A__ = 1_280
A__ = 0.0
A__ = False
A__ = jnp.floataa
A__ = True
A__ = 0
A__ = False
def A_ ( self : List[str] , __a : jax.random.KeyArray ) -> Optional[Any]:
'''simple docstring'''
# init input tensors
__snake_case : Tuple = (1, self.in_channels, self.sample_size, self.sample_size)
__snake_case : Dict = jnp.zeros(_lowercase , dtype=jnp.floataa )
__snake_case : Any = jnp.ones((1,) , dtype=jnp.intaa )
__snake_case : List[Any] = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
__snake_case , __snake_case : Tuple = jax.random.split(_lowercase )
__snake_case : str = {'params': params_rng, 'dropout': dropout_rng}
return self.init(_lowercase , _lowercase , _lowercase , _lowercase )["params"]
def A_ ( self : Any ) -> Optional[Any]:
'''simple docstring'''
__snake_case : List[Any] = self.block_out_channels
__snake_case : Any = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
'At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.' )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__snake_case : Optional[Any] = self.num_attention_heads or self.attention_head_dim
# input
__snake_case : int = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
__snake_case : Any = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
__snake_case : List[Any] = FlaxTimestepEmbedding(_lowercase , dtype=self.dtype )
__snake_case : Optional[int] = self.only_cross_attention
if isinstance(_lowercase , _lowercase ):
__snake_case : Dict = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_lowercase , _lowercase ):
__snake_case : str = (num_attention_heads,) * len(self.down_block_types )
# down
__snake_case : Optional[int] = []
__snake_case : Union[str, Any] = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
__snake_case : Dict = output_channel
__snake_case : Union[str, Any] = block_out_channels[i]
__snake_case : Any = i == len(_lowercase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__snake_case : int = FlaxCrossAttnDownBlockaD(
in_channels=_lowercase , out_channels=_lowercase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__snake_case : Union[str, Any] = FlaxDownBlockaD(
in_channels=_lowercase , out_channels=_lowercase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_lowercase )
__snake_case : List[Any] = down_blocks
# mid
__snake_case : List[Any] = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
__snake_case : List[Any] = []
__snake_case : Tuple = list(reversed(_lowercase ) )
__snake_case : List[str] = list(reversed(_lowercase ) )
__snake_case : Optional[Any] = list(reversed(_lowercase ) )
__snake_case : Optional[int] = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
__snake_case : Dict = output_channel
__snake_case : str = reversed_block_out_channels[i]
__snake_case : str = reversed_block_out_channels[min(i + 1 , len(_lowercase ) - 1 )]
__snake_case : Optional[int] = i == len(_lowercase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
__snake_case : Union[str, Any] = FlaxCrossAttnUpBlockaD(
in_channels=_lowercase , out_channels=_lowercase , prev_output_channel=_lowercase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
__snake_case : Union[str, Any] = FlaxUpBlockaD(
in_channels=_lowercase , out_channels=_lowercase , prev_output_channel=_lowercase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(_lowercase )
__snake_case : Dict = output_channel
__snake_case : List[str] = up_blocks
# out
__snake_case : Union[str, Any] = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
__snake_case : List[str] = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Optional[int] , __a : List[Any] , __a : Any , __a : Optional[Any] , __a : Optional[Any]=None , __a : List[str]=None , __a : bool = True , __a : bool = False , ) -> int:
'''simple docstring'''
# 1. time
if not isinstance(_lowercase , jnp.ndarray ):
__snake_case : Union[str, Any] = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_lowercase , jnp.ndarray ) and len(timesteps.shape ) == 0:
__snake_case : Optional[Any] = timesteps.astype(dtype=jnp.floataa )
__snake_case : Optional[int] = jnp.expand_dims(_lowercase , 0 )
__snake_case : List[str] = self.time_proj(_lowercase )
__snake_case : Optional[Any] = self.time_embedding(_lowercase )
# 2. pre-process
__snake_case : Any = jnp.transpose(_lowercase , (0, 2, 3, 1) )
__snake_case : int = self.conv_in(_lowercase )
# 3. down
__snake_case : Union[str, Any] = (sample,)
for down_block in self.down_blocks:
if isinstance(_lowercase , _lowercase ):
__snake_case , __snake_case : Union[str, Any] = down_block(_lowercase , _lowercase , _lowercase , deterministic=not train )
else:
__snake_case , __snake_case : Dict = down_block(_lowercase , _lowercase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
__snake_case : List[str] = ()
for down_block_res_sample, down_block_additional_residual in zip(
_lowercase , _lowercase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
__snake_case : str = new_down_block_res_samples
# 4. mid
__snake_case : List[Any] = self.mid_block(_lowercase , _lowercase , _lowercase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
__snake_case : Optional[int] = down_block_res_samples[-(self.layers_per_block + 1) :]
__snake_case : Dict = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(_lowercase , _lowercase ):
__snake_case : Any = up_block(
_lowercase , temb=_lowercase , encoder_hidden_states=_lowercase , res_hidden_states_tuple=_lowercase , deterministic=not train , )
else:
__snake_case : int = up_block(_lowercase , temb=_lowercase , res_hidden_states_tuple=_lowercase , deterministic=not train )
# 6. post-process
__snake_case : Union[str, Any] = self.conv_norm_out(_lowercase )
__snake_case : Tuple = nn.silu(_lowercase )
__snake_case : Optional[int] = self.conv_out(_lowercase )
__snake_case : Optional[int] = jnp.transpose(_lowercase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=_lowercase )
| 369 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
A__ : Tuple = pytest.mark.integration
@require_faiss
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def A_ ( self : Any ) -> Tuple:
'''simple docstring'''
__snake_case : Dict = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(__a ) for x in np.arange(30 ).tolist()]} )
return dset
def A_ ( self : Union[str, Any] ) -> List[Any]:
'''simple docstring'''
import faiss
__snake_case : Dataset = self._create_dummy_dataset()
__snake_case : Dict = dset.map(
lambda __a , __a : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__a , keep_in_memory=__a )
__snake_case : List[Any] = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
__snake_case , __snake_case : Any = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def A_ ( self : Tuple ) -> Any:
'''simple docstring'''
import faiss
__snake_case : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__snake_case , __snake_case : Any = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def A_ ( self : List[Any] ) -> Dict:
'''simple docstring'''
import faiss
__snake_case : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__a ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
__snake_case , __snake_case : str = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def A_ ( self : Union[str, Any] ) -> Dict:
'''simple docstring'''
__snake_case : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(__a , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def A_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
from elasticsearch import Elasticsearch
__snake_case : Dataset = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__snake_case : Any = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
__snake_case : Dict = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
__snake_case : Union[str, Any] = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=__a )
__snake_case , __snake_case : str = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def A_ ( self : str ) -> int:
'''simple docstring'''
import faiss
__snake_case : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
__snake_case : Dict = np.zeros(5 , dtype=np.floataa )
__snake_case : List[str] = 1
__snake_case , __snake_case : List[Any] = index.search(__a )
self.assertRaises(__a , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
__snake_case : List[str] = np.eye(5 , dtype=np.floataa )[::-1]
__snake_case , __snake_case : Dict = index.search_batch(__a )
self.assertRaises(__a , index.search_batch , queries[0] )
__snake_case : Any = [scores[0] for scores in total_scores]
__snake_case : List[Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__a ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __a )
def A_ ( self : int ) -> int:
'''simple docstring'''
import faiss
__snake_case : int = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
__snake_case : List[str] = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__a ):
__snake_case : Dict = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def A_ ( self : str ) -> Dict:
'''simple docstring'''
import faiss
__snake_case : Tuple = faiss.IndexFlat(5 )
__snake_case : List[Any] = FaissIndex(custom_index=__a )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def A_ ( self : List[Any] ) -> int:
'''simple docstring'''
import faiss
__snake_case : Optional[Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__a ) as tmp_file:
index.save(tmp_file.name )
__snake_case : List[Any] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__snake_case : List[Any] = np.zeros(5 , dtype=np.floataa )
__snake_case : Any = 1
__snake_case , __snake_case : int = index.search(__a )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def a_ ( _UpperCAmelCase : str ) -> Optional[int]:
import faiss
__snake_case : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 ,dtype=np.floataa ) )
__snake_case : Dict = 'index.faiss'
__snake_case : Any = f'''mock://{index_name}'''
index.save(_UpperCAmelCase ,storage_options=mockfs.storage_options )
__snake_case : Any = FaissIndex.load(_UpperCAmelCase ,storage_options=mockfs.storage_options )
__snake_case : Any = np.zeros(5 ,dtype=np.floataa )
__snake_case : Any = 1
__snake_case , __snake_case : Tuple = index.search(_UpperCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class snake_case__ ( SCREAMING_SNAKE_CASE_ ):
def A_ ( self : List[str] ) -> List[str]:
'''simple docstring'''
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__snake_case : int = Elasticsearch()
__snake_case : Dict = {'acknowledged': True}
__snake_case : List[Any] = ElasticSearchIndex(es_client=__a )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
__snake_case : Optional[Any] = 'foo'
__snake_case : int = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case : List[Any] = index.search(__a )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
__snake_case : Dict = 'foo'
__snake_case : Dict = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__snake_case , __snake_case : Optional[Any] = index.search(__a , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
__snake_case : List[Any] = ['foo', 'bar', 'foobar']
__snake_case : str = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case : Any = index.search_batch(__a )
__snake_case : Any = [scores[0] for scores in total_scores]
__snake_case : Tuple = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__a ) , 0 )
self.assertListEqual([1, 1, 1] , __a )
# batched queries with timeout
__snake_case : Tuple = ['foo', 'bar', 'foobar']
__snake_case : List[Any] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__snake_case , __snake_case : int = index.search_batch(__a , request_timeout=30 )
__snake_case : Any = [scores[0] for scores in total_scores]
__snake_case : Dict = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__a ) , 0 )
self.assertListEqual([1, 1, 1] , __a )
| 0 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase_ = {
'configuration_clap': [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapAudioConfig',
'ClapConfig',
'ClapTextConfig',
],
'processing_clap': ['ClapProcessor'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase_ = [
'CLAP_PRETRAINED_MODEL_ARCHIVE_LIST',
'ClapModel',
'ClapPreTrainedModel',
'ClapTextModel',
'ClapTextModelWithProjection',
'ClapAudioModel',
'ClapAudioModelWithProjection',
]
lowerCamelCase_ = ['ClapFeatureExtractor']
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
lowerCamelCase_ = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 268 | """simple docstring"""
def a_ ( lowerCamelCase ):
return str(lowerCamelCase ) == str(lowerCamelCase )[::-1]
def a_ ( lowerCamelCase ):
return int(lowerCamelCase ) + int(str(lowerCamelCase )[::-1] )
def a_ ( lowerCamelCase = 1_0_0_0_0 ):
UpperCAmelCase__ = []
for num in range(1 , lowerCamelCase ):
UpperCAmelCase__ = 0
UpperCAmelCase__ = num
while iterations < 5_0:
UpperCAmelCase__ = sum_reverse(lowerCamelCase )
iterations += 1
if is_palindrome(lowerCamelCase ):
break
else:
lychrel_nums.append(lowerCamelCase )
return len(lowerCamelCase )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 98 | 0 |
'''simple docstring'''
from __future__ import annotations
# This is the precision for this function which can be altered.
# It is recommended for users to keep this number greater than or equal to 10.
UpperCAmelCase = 10
def _snake_case ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
for i in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if array[i] == target:
return i
return -1
def _snake_case ( _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
lowerCAmelCase = 0
lowerCAmelCase = len(_SCREAMING_SNAKE_CASE )
while left <= right:
if right - left < precision:
return lin_search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase = (left + right) // 3 + 1
lowerCAmelCase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
lowerCAmelCase = one_third - 1
elif array[two_third] < target:
lowerCAmelCase = two_third + 1
else:
lowerCAmelCase = one_third + 1
lowerCAmelCase = two_third - 1
else:
return -1
def _snake_case ( _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : int , _SCREAMING_SNAKE_CASE : list[int] , _SCREAMING_SNAKE_CASE : int ) -> int:
"""simple docstring"""
if left < right:
if right - left < precision:
return lin_search(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase = (left + right) // 3 + 1
lowerCAmelCase = 2 * (left + right) // 3 + 1
if array[one_third] == target:
return one_third
elif array[two_third] == target:
return two_third
elif target < array[one_third]:
return rec_ternary_search(_SCREAMING_SNAKE_CASE , one_third - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif array[two_third] < target:
return rec_ternary_search(two_third + 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
return rec_ternary_search(one_third + 1 , two_third - 1 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod()
UpperCAmelCase = input('Enter numbers separated by comma:\n').strip()
UpperCAmelCase = [int(item.strip()) for item in user_input.split(',')]
assert collection == sorted(collection), F"List must be ordered.\n{collection}."
UpperCAmelCase = int(input('Enter the number to be found in the list:\n').strip())
UpperCAmelCase = ite_ternary_search(collection, target)
UpperCAmelCase = rec_ternary_search(0, len(collection) - 1, collection, target)
if resulta != -1:
print(F'''Iterative search: {target} found at positions: {resulta}''')
print(F'''Recursive search: {target} found at positions: {resulta}''')
else:
print('Not found') | 187 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase = logging.get_logger(__name__)
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : Optional[int] = "timm_backbone"
def __init__( self , A_=None , A_=3 , A_=True , A_=True , A_=None , **A_ , ) -> int:
super().__init__(**A_ )
lowerCAmelCase = backbone
lowerCAmelCase = num_channels
lowerCAmelCase = features_only
lowerCAmelCase = use_pretrained_backbone
lowerCAmelCase = True
lowerCAmelCase = out_indices if out_indices is not None else (-1,) | 187 | 1 |
'''simple docstring'''
import os
from datetime import datetime as dt
from github import Github
A__ : Optional[Any] = [
"""good first issue""",
"""feature request""",
"""wip""",
]
def UpperCAmelCase__ ( ) -> Any:
__lowerCamelCase : int = Github(os.environ['GITHUB_TOKEN'] )
__lowerCamelCase : Union[str, Any] = g.get_repo('huggingface/accelerate' )
__lowerCamelCase : Dict = repo.get_issues(state='open' )
for issue in open_issues:
__lowerCamelCase : List[str] = sorted([comment for comment in issue.get_comments()] , key=lambda UpperCAmelCase_ : i.created_at , reverse=UpperCAmelCase_ )
__lowerCamelCase : Optional[Any] = comments[0] if len(UpperCAmelCase_ ) > 0 else None
__lowerCamelCase : List[Any] = dt.utcnow()
__lowerCamelCase : Dict = (current_time - issue.updated_at).days
__lowerCamelCase : Tuple = (current_time - issue.created_at).days
if (
last_comment is not None
and last_comment.user.login == "github-actions[bot]"
and days_since_updated > 7
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Close issue since it has been 7 days of inactivity since bot mention.
issue.edit(state='closed' )
elif (
days_since_updated > 23
and days_since_creation >= 30
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels() )
):
# Add stale comment
issue.create_comment(
'This issue has been automatically marked as stale because it has not had '
'recent activity. If you think this still needs to be addressed '
'please comment on this thread.\n\nPlease note that issues that do not follow the '
'[contributing guidelines](https://github.com/huggingface/accelerate/blob/main/CONTRIBUTING.md) '
'are likely to be ignored.' )
if __name__ == "__main__":
main()
| 185 |
'''simple docstring'''
from numpy import exp, pi, sqrt
def UpperCAmelCase__ ( UpperCAmelCase_ : Optional[int] , UpperCAmelCase_ : float = 0.0 , UpperCAmelCase_ : float = 1.0 ) -> int:
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 185 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_flax_available, is_torch_available
lowerCAmelCase : Tuple = {
"""configuration_gpt_neo""": ["""GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GPTNeoConfig""", """GPTNeoOnnxConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : Tuple = [
"""GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GPTNeoForCausalLM""",
"""GPTNeoForQuestionAnswering""",
"""GPTNeoForSequenceClassification""",
"""GPTNeoForTokenClassification""",
"""GPTNeoModel""",
"""GPTNeoPreTrainedModel""",
"""load_tf_weights_in_gpt_neo""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase : int = [
"""FlaxGPTNeoForCausalLM""",
"""FlaxGPTNeoModel""",
"""FlaxGPTNeoPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_gpt_neo import GPT_NEO_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoConfig, GPTNeoOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neo import (
GPT_NEO_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoForCausalLM,
GPTNeoForQuestionAnswering,
GPTNeoForSequenceClassification,
GPTNeoForTokenClassification,
GPTNeoModel,
GPTNeoPreTrainedModel,
load_tf_weights_in_gpt_neo,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_gpt_neo import FlaxGPTNeoForCausalLM, FlaxGPTNeoModel, FlaxGPTNeoPreTrainedModel
else:
import sys
lowerCAmelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 168 |
"""simple docstring"""
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("""hub/hopper-medium-v2/unet/hor32""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/unet/hor128""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/value_function""", exist_ok=True)
def a__ ( snake_case__ ) -> Tuple:
if hor == 1_28:
lowerCamelCase = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
lowerCamelCase = (32, 1_28, 2_56)
lowerCamelCase = ("""UpResnetBlock1D""", """UpResnetBlock1D""")
elif hor == 32:
lowerCamelCase = ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""")
lowerCamelCase = (32, 64, 1_28, 2_56)
lowerCamelCase = ("""UpResnetBlock1D""", """UpResnetBlock1D""", """UpResnetBlock1D""")
lowerCamelCase = torch.load(F'/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch' )
lowerCamelCase = model.state_dict()
lowerCamelCase = {
"""down_block_types""": down_block_types,
"""block_out_channels""": block_out_channels,
"""up_block_types""": up_block_types,
"""layers_per_block""": 1,
"""use_timestep_embedding""": True,
"""out_block_type""": """OutConv1DBlock""",
"""norm_num_groups""": 8,
"""downsample_each_block""": False,
"""in_channels""": 14,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""sample_size""": 6_55_36,
"""mid_block_type""": """MidResTemporalBlock1D""",
"""act_fn""": """mish""",
}
lowerCamelCase = UNetaDModel(**snake_case__ )
print(F'length of state dict: {len(state_dict.keys() )}' )
print(F'length of value function dict: {len(hf_value_function.state_dict().keys() )}' )
lowerCamelCase = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase = state_dict.pop(snake_case__ )
hf_value_function.load_state_dict(snake_case__ )
torch.save(hf_value_function.state_dict() , F'hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin' )
with open(F'hub/hopper-medium-v2/unet/hor{hor}/config.json' , """w""" ) as f:
json.dump(snake_case__ , snake_case__ )
def a__ ( ) -> Optional[int]:
lowerCamelCase = {
"""in_channels""": 14,
"""down_block_types""": ("""DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D""", """DownResnetBlock1D"""),
"""up_block_types""": (),
"""out_block_type""": """ValueFunction""",
"""mid_block_type""": """ValueFunctionMidBlock1D""",
"""block_out_channels""": (32, 64, 1_28, 2_56),
"""layers_per_block""": 1,
"""downsample_each_block""": True,
"""sample_size""": 6_55_36,
"""out_channels""": 14,
"""extra_in_channels""": 0,
"""time_embedding_type""": """positional""",
"""use_timestep_embedding""": True,
"""flip_sin_to_cos""": False,
"""freq_shift""": 1,
"""norm_num_groups""": 8,
"""act_fn""": """mish""",
}
lowerCamelCase = torch.load("""/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch""" )
lowerCamelCase = model
lowerCamelCase = UNetaDModel(**snake_case__ )
print(F'length of state dict: {len(state_dict.keys() )}' )
print(F'length of value function dict: {len(hf_value_function.state_dict().keys() )}' )
lowerCamelCase = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCamelCase = state_dict.pop(snake_case__ )
hf_value_function.load_state_dict(snake_case__ )
torch.save(hf_value_function.state_dict() , """hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin""" )
with open("""hub/hopper-medium-v2/value_function/config.json""" , """w""" ) as f:
json.dump(snake_case__ , snake_case__ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function()
| 168 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_UpperCAmelCase : Tuple = {
"""configuration_encodec""": [
"""ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""EncodecConfig""",
],
"""feature_extraction_encodec""": ["""EncodecFeatureExtractor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_UpperCAmelCase : Optional[int] = [
"""ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""EncodecModel""",
"""EncodecPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_encodec import (
ENCODEC_PRETRAINED_CONFIG_ARCHIVE_MAP,
EncodecConfig,
)
from .feature_extraction_encodec import EncodecFeatureExtractor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_encodec import (
ENCODEC_PRETRAINED_MODEL_ARCHIVE_LIST,
EncodecModel,
EncodecPreTrainedModel,
)
else:
import sys
_UpperCAmelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 174 |
'''simple docstring'''
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
_SCREAMING_SNAKE_CASE = datasets.utils.logging.get_logger(__name__)
class lowerCAmelCase_ ( folder_based_builder.FolderBasedBuilderConfig ):
__lowerCamelCase : bool = None
__lowerCamelCase : bool = None
class lowerCAmelCase_ ( folder_based_builder.FolderBasedBuilder ):
__lowerCamelCase : Tuple = datasets.Audio()
__lowerCamelCase : List[str] = "audio"
__lowerCamelCase : Optional[int] = AudioFolderConfig
__lowerCamelCase : List[str] # definition at the bottom of the script
__lowerCamelCase : Optional[int] = AudioClassification(audio_column="audio" ,label_column="label" )
_SCREAMING_SNAKE_CASE = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
_SCREAMING_SNAKE_CASE = AUDIO_EXTENSIONS
| 158 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {
'facebook/timesformer': 'https://huggingface.co/facebook/timesformer/resolve/main/config.json',
}
class a_ ( lowerCamelCase ):
lowercase = """timesformer"""
def __init__( self , _SCREAMING_SNAKE_CASE=224 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=768 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3072 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0_2 , _SCREAMING_SNAKE_CASE=1e-6 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="divided_space_time" , _SCREAMING_SNAKE_CASE=0 , **_SCREAMING_SNAKE_CASE , ) -> Optional[Any]:
"""simple docstring"""
super().__init__(**_SCREAMING_SNAKE_CASE )
UpperCamelCase = image_size
UpperCamelCase = patch_size
UpperCamelCase = num_channels
UpperCamelCase = num_frames
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_act
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = qkv_bias
UpperCamelCase = attention_type
UpperCamelCase = drop_path_rate
| 369 |
'''simple docstring'''
from timeit import timeit
def lowercase__ ( __UpperCamelCase )-> int:
if number < 0:
raise ValueError("""the value of input must not be negative""" )
UpperCamelCase = 0
while number:
number &= number - 1
result += 1
return result
def lowercase__ ( __UpperCamelCase )-> int:
if number < 0:
raise ValueError("""the value of input must not be negative""" )
UpperCamelCase = 0
while number:
if number % 2 == 1:
result += 1
number >>= 1
return result
def lowercase__ ( )-> None:
def do_benchmark(__UpperCamelCase ) -> None:
UpperCamelCase = """import __main__ as z"""
print(F"Benchmark when {number = }:" )
print(F"{get_set_bits_count_using_modulo_operator(__UpperCamelCase ) = }" )
UpperCamelCase = timeit("""z.get_set_bits_count_using_modulo_operator(25)""" , setup=__UpperCamelCase )
print(F"timeit() runs in {timing} seconds" )
print(F"{get_set_bits_count_using_brian_kernighans_algorithm(__UpperCamelCase ) = }" )
UpperCamelCase = timeit(
"""z.get_set_bits_count_using_brian_kernighans_algorithm(25)""" , setup=__UpperCamelCase , )
print(F"timeit() runs in {timing} seconds" )
for number in (25, 37, 58, 0):
do_benchmark(__UpperCamelCase )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 183 | 0 |
import itertools
import string
from collections.abc import Generator, Iterable
def SCREAMING_SNAKE_CASE_ ( __A : Iterable[str] , __A : int ) -> Generator[tuple[str, ...], None, None]:
"""simple docstring"""
a_ : List[str] = iter(__A )
while True:
a_ : Optional[int] = tuple(itertools.islice(__A , __A ) )
if not chunk:
return
yield chunk
def SCREAMING_SNAKE_CASE_ ( __A : str ) -> str:
"""simple docstring"""
a_ : Dict = ''.join([c.upper() for c in dirty if c in string.ascii_letters] )
a_ : Union[str, Any] = ''
if len(__A ) < 2:
return dirty
for i in range(len(__A ) - 1 ):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(__A ) & 1:
clean += "X"
return clean
def SCREAMING_SNAKE_CASE_ ( __A : str ) -> list[str]:
"""simple docstring"""
a_ : Optional[Any] = 'ABCDEFGHIKLMNOPQRSTUVWXYZ'
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
a_ : Optional[Any] = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(__A )
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(__A )
return table
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : str ) -> str:
"""simple docstring"""
a_ : Optional[int] = generate_table(__A )
a_ : Tuple = prepare_input(__A )
a_ : List[Any] = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__A , 2 ):
a_ , a_ : List[Any] = divmod(table.index(__A ) , 5 )
a_ , a_ : Dict = divmod(table.index(__A ) , 5 )
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def SCREAMING_SNAKE_CASE_ ( __A : str , __A : str ) -> str:
"""simple docstring"""
a_ : Union[str, Any] = generate_table(__A )
a_ : Union[str, Any] = ''
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(__A , 2 ):
a_ , a_ : Optional[Any] = divmod(table.index(__A ) , 5 )
a_ , a_ : Dict = divmod(table.index(__A ) , 5 )
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 32 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
a_ = {
"""configuration_perceiver""": ["""PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PerceiverConfig""", """PerceiverOnnxConfig"""],
"""tokenization_perceiver""": ["""PerceiverTokenizer"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = ["""PerceiverFeatureExtractor"""]
a_ = ["""PerceiverImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a_ = [
"""PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PerceiverForImageClassificationConvProcessing""",
"""PerceiverForImageClassificationFourier""",
"""PerceiverForImageClassificationLearned""",
"""PerceiverForMaskedLM""",
"""PerceiverForMultimodalAutoencoding""",
"""PerceiverForOpticalFlow""",
"""PerceiverForSequenceClassification""",
"""PerceiverLayer""",
"""PerceiverModel""",
"""PerceiverPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
a_ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 179 | 0 |
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ) -> int:
'''simple docstring'''
lowerCAmelCase : List[Any] = prime_factors(_UpperCAmelCase )
if is_square_free(_UpperCAmelCase ):
return -1 if len(_UpperCAmelCase ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323 |
from __future__ import annotations
from typing import Any
class __A :
def __init__( self : Optional[Any] , UpperCAmelCase_ : int ):
lowerCAmelCase : Tuple = num_of_nodes
lowerCAmelCase : list[list[int]] = []
lowerCAmelCase : dict[int, int] = {}
def lowercase__ ( self : List[str] , UpperCAmelCase_ : int , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
self.m_edges.append([u_node, v_node, weight] )
def lowercase__ ( self : Dict , UpperCAmelCase_ : int ):
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def lowercase__ ( self : Optional[int] , UpperCAmelCase_ : int ):
if self.m_component[u_node] != u_node:
for k in self.m_component:
lowerCAmelCase : Dict = self.find_component(UpperCAmelCase_ )
def lowercase__ ( self : List[str] , UpperCAmelCase_ : list[int] , UpperCAmelCase_ : int , UpperCAmelCase_ : int ):
if component_size[u_node] <= component_size[v_node]:
lowerCAmelCase : Optional[int] = v_node
component_size[v_node] += component_size[u_node]
self.set_component(UpperCAmelCase_ )
elif component_size[u_node] >= component_size[v_node]:
lowerCAmelCase : Union[str, Any] = self.find_component(UpperCAmelCase_ )
component_size[u_node] += component_size[v_node]
self.set_component(UpperCAmelCase_ )
def lowercase__ ( self : str ):
lowerCAmelCase : str = []
lowerCAmelCase : Tuple = 0
lowerCAmelCase : list[Any] = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
lowerCAmelCase : int = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Union[str, Any] = edge
lowerCAmelCase : Optional[int] = self.m_component[u]
lowerCAmelCase : str = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
lowerCAmelCase : str = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(UpperCAmelCase_ , UpperCAmelCase_ ):
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase : Any = edge
lowerCAmelCase : Optional[Any] = self.m_component[u]
lowerCAmelCase : Optional[Any] = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ )
print(f"Added edge [{u} - {v}]\nAdded weight: {w}\n" )
num_of_components -= 1
lowerCAmelCase : Optional[Any] = [-1] * self.m_num_of_nodes
print(f"The total weight of the minimal spanning tree is: {mst_weight}" )
def SCREAMING_SNAKE_CASE__ ( ) -> None:
'''simple docstring'''
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323 | 1 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> Tuple:
"""simple docstring"""
A__ = []
for part_id in partition_order:
A__ = df.where(f"""SPARK_PARTITION_ID() = {part_id}""" ).collect()
for row_idx, row in enumerate(lowercase_ ):
expected_row_ids_and_row_dicts.append((f"""{part_id}_{row_idx}""", row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ) -> str:
"""simple docstring"""
A__ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
A__ = spark.range(100 ).repartition(1 )
A__ = Spark(lowercase_ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ) -> Dict:
"""simple docstring"""
A__ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
A__ = spark.range(10 ).repartition(2 )
A__ = [1, 0]
A__ = _generate_iterable_examples(lowercase_ , lowercase_ ) # Reverse the partitions.
A__ = _get_expected_row_ids_and_row_dicts_for_partition_order(lowercase_ , lowercase_ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
A__ , A__ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ) -> Dict:
"""simple docstring"""
A__ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
A__ = spark.range(10 ).repartition(1 )
A__ = SparkExamplesIterable(lowercase_ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(lowercase_ ):
assert row_id == f"""0_{i}"""
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ) -> Optional[Any]:
"""simple docstring"""
A__ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
A__ = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('''numpy.random.Generator''' ) as generator_mock:
A__ = lambda lowercase_ : x.reverse()
A__ = _get_expected_row_ids_and_row_dicts_for_partition_order(lowercase_ , [2, 1, 0] )
A__ = SparkExamplesIterable(lowercase_ ).shuffle_data_sources(lowercase_ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(lowercase_ ):
A__ , A__ = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ) -> Tuple:
"""simple docstring"""
A__ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
A__ = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
A__ = SparkExamplesIterable(lowercase_ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
A__ = _get_expected_row_ids_and_row_dicts_for_partition_order(lowercase_ , [0, 2] )
for i, (row_id, row_dict) in enumerate(lowercase_ ):
A__ , A__ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
A__ = SparkExamplesIterable(lowercase_ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
A__ = _get_expected_row_ids_and_row_dicts_for_partition_order(lowercase_ , [1, 3] )
for i, (row_id, row_dict) in enumerate(lowercase_ ):
A__ , A__ = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ) -> str:
"""simple docstring"""
A__ = pyspark.sql.SparkSession.builder.master('''local[*]''' ).appName('''pyspark''' ).getOrCreate()
A__ = spark.range(100 ).repartition(1 )
A__ = Spark(lowercase_ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 14 |
import os
import pytest
from transformers.dynamic_module_utils import get_imports
_lowerCamelCase : Any = """
import os
"""
_lowerCamelCase : Optional[int] = """
def foo():
import os
return False
"""
_lowerCamelCase : List[Any] = """
def foo():
def bar():
if True:
import os
return False
return bar()
"""
_lowerCamelCase : List[Any] = """
import os
try:
import bar
except ImportError:
raise ValueError()
"""
_lowerCamelCase : Union[str, Any] = """
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
"""
_lowerCamelCase : List[Any] = """
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
"""
_lowerCamelCase : List[Any] = """
import os
try:
import bar
except ImportError as e:
raise ValueError()
"""
_lowerCamelCase : str = """
import os
try:
import bar
except:
raise ValueError()
"""
_lowerCamelCase : Optional[Any] = """
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
"""
_lowerCamelCase : Any = """
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
"""
_lowerCamelCase : Dict = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('''case''' , lowercase_ )
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ ) -> int:
"""simple docstring"""
A__ = os.path.join(lowercase_ , '''test_file.py''' )
with open(lowercase_ , '''w''' ) as _tmp_file:
_tmp_file.write(lowercase_ )
A__ = get_imports(lowercase_ )
assert parsed_imports == ["os"]
| 14 | 1 |
'''simple docstring'''
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
_UpperCAmelCase : Optional[Any] = 5_0_0_0_0
_UpperCAmelCase : str = 5_0_0_0
_UpperCAmelCase : str = os.path.split(__file__)
_UpperCAmelCase : Tuple = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def __magic_name__( lowerCamelCase, lowerCamelCase):
for i in range(lowerCamelCase_):
__lowerCAmelCase = dataset[i]
@get_duration
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
for i in range(0, len(lowerCamelCase_), lowerCamelCase_):
__lowerCAmelCase = dataset[i : i + batch_size]
@get_duration
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase):
with dataset.formatted_as(type=lowerCamelCase_):
for i in range(lowerCamelCase_):
__lowerCAmelCase = dataset[i]
@get_duration
def __magic_name__( lowerCamelCase, lowerCamelCase, lowerCamelCase, lowerCamelCase):
with dataset.formatted_as(type=lowerCamelCase_):
for i in range(0, lowerCamelCase_, lowerCamelCase_):
__lowerCAmelCase = dataset[i : i + batch_size]
def __magic_name__( ):
__lowerCAmelCase = {"""num examples""": SPEED_TEST_N_EXAMPLES}
__lowerCAmelCase = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_0}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_0_0}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_0_0_0}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """pandas""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """torch""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """tensorflow""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1_0}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1_0_0_0}),
]
__lowerCAmelCase = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_0}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_0_0}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_0_0_0}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1_0}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1_0_0_0}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print('''generating dataset''')
__lowerCAmelCase = datasets.Features(
{'''list''': datasets.Sequence(datasets.Value('''float32''')), '''numbers''': datasets.Value('''float32''')})
__lowerCAmelCase = generate_example_dataset(
os.path.join(lowerCamelCase_, '''dataset.arrow'''), lowerCamelCase_, num_examples=lowerCamelCase_, seq_shapes={'''list''': (1_0_0,)}, )
print('''first set of iterations''')
for func, kwargs in functions:
print(func.__name__, str(lowerCamelCase_))
__lowerCAmelCase = func(lowerCamelCase_, **lowerCamelCase_)
print('''shuffling dataset''')
__lowerCAmelCase = dataset.shuffle()
print('''Second set of iterations (after shuffling''')
for func, kwargs in functions_shuffled:
print('''shuffled ''', func.__name__, str(lowerCamelCase_))
__lowerCAmelCase = func(
lowerCamelCase_, **lowerCamelCase_)
with open(lowerCamelCase_, '''wb''') as f:
f.write(json.dumps(lowerCamelCase_).encode('''utf-8'''))
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 358 |
'''simple docstring'''
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class a__ ( __A , unittest.TestCase ):
"""simple docstring"""
__UpperCamelCase : str = DebertaTokenizer
__UpperCamelCase : str = True
__UpperCamelCase : Any = DebertaTokenizerFast
def _snake_case (self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCAmelCase = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
__lowerCAmelCase = dict(zip(__lowercase , range(len(__lowercase ) ) ) )
__lowerCAmelCase = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
__lowerCAmelCase = {'''unk_token''': '''[UNK]'''}
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
__lowerCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__lowercase ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__lowercase ) )
def _snake_case (self , **__lowercase ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__lowercase )
def _snake_case (self , __lowercase ):
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = '''lower newer'''
return input_text, output_text
def _snake_case (self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = '''lower newer'''
__lowerCAmelCase = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
__lowerCAmelCase = tokenizer.tokenize(__lowercase )
self.assertListEqual(__lowercase , __lowercase )
__lowerCAmelCase = tokens + [tokenizer.unk_token]
__lowerCAmelCase = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__lowercase ) , __lowercase )
def _snake_case (self ):
__lowerCAmelCase = self.get_tokenizer()
__lowerCAmelCase = tokenizer('''Hello''' , '''World''' )
__lowerCAmelCase = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , __lowercase )
@slow
def _snake_case (self ):
__lowerCAmelCase = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
__lowerCAmelCase = tokenizer.encode('''sequence builders''' , add_special_tokens=__lowercase )
__lowerCAmelCase = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__lowercase )
__lowerCAmelCase = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__lowercase , add_prefix_space=__lowercase )
__lowerCAmelCase = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__lowercase , add_prefix_space=__lowercase )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__lowercase )
__lowerCAmelCase = tokenizer.build_inputs_with_special_tokens(__lowercase , __lowercase )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def _snake_case (self ):
__lowerCAmelCase = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
__lowerCAmelCase = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
__lowerCAmelCase = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
__lowerCAmelCase = tokenizer(__lowercase , padding=__lowercase )
__lowerCAmelCase = [tokenizer.decode(__lowercase , skip_special_tokens=__lowercase ) for seq in encoding['''input_ids''']]
# fmt: off
__lowerCAmelCase = {
'''input_ids''': [
[1, 21_18, 1_11_26, 5_65, 35, 83, 2_51_91, 1_63, 1_88_54, 13, 1_21_56, 12, 1_61_01, 2_53_76, 1_38_07, 9, 2_22_05, 2_78_93, 16_35, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 21_18, 1_11_26, 5_65, 2_45_36, 80, 4_37_97, 48_78, 73_73, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_33, 78, 65, 16, 10, 37_24, 15_38, 3_31_83, 1_13_03, 4_37_97, 19_38, 4, 8_70, 2_41_65, 2_91_05, 5, 7_39, 3_26_44, 3_31_83, 1_13_03, 3_61_73, 88, 80, 6_50, 78_21, 4_59_40, 6, 52, 25_59, 5, 18_36, 9, 5, 73_97, 1_31_71, 31, 5, 18_36, 9, 3_26_44, 3_31_83, 1_13_03, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
__lowerCAmelCase = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , __lowercase )
for expected, decoded in zip(__lowercase , __lowercase ):
self.assertEqual(__lowercase , __lowercase )
| 9 | 0 |
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
A : Tuple = logging.get_logger(__name__)
A : Optional[Any] = {
"vocab_file": "vocab.json",
"tokenizer_config_file": "tokenizer_config.json",
"merges_file": "merges.txt",
}
A : str = {
"vocab_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"
),
},
"tokenizer_config_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"
),
},
"merges_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"
),
},
}
A : str = "</w>"
A : Dict = "@@ "
def _lowerCamelCase ( _UpperCamelCase ):
'''simple docstring'''
__lowerCAmelCase = set()
__lowerCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCAmelCase = char
return pairs
# Speech2Text2 has no max input length
A : Any = {"facebook/s2t-wav2vec2-large-en-de": 1_0_2_4}
class _UpperCamelCase ( lowerCAmelCase__ ):
'''simple docstring'''
__UpperCAmelCase : Optional[int] =VOCAB_FILES_NAMES
__UpperCAmelCase : str =PRETRAINED_VOCAB_FILES_MAP
__UpperCAmelCase : Any =PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCAmelCase : Dict =["""input_ids""", """attention_mask"""]
def __init__( self , __a , __a="<s>" , __a="<pad>" , __a="</s>" , __a="<unk>" , __a=False , __a=None , **__a , ):
super().__init__(
unk_token=__a , bos_token=__a , eos_token=__a , pad_token=__a , do_lower_case=__a , **__a , )
__lowerCAmelCase = do_lower_case
with open(__a , encoding="utf-8" ) as vocab_handle:
__lowerCAmelCase = json.load(__a )
__lowerCAmelCase = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f"No merges files provided. {self.__class__.__name__} can only be used for decoding." )
__lowerCAmelCase = None
__lowerCAmelCase = None
else:
with open(__a , encoding="utf-8" ) as merges_handle:
__lowerCAmelCase = merges_handle.read().split("\n" )[:-1]
__lowerCAmelCase = [tuple(merge.split()[:2] ) for merge in merges]
__lowerCAmelCase = dict(zip(__a , range(len(__a ) ) ) )
__lowerCAmelCase = {}
@property
def snake_case ( self ):
return len(self.decoder )
def snake_case ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def snake_case ( self , __a ):
__lowerCAmelCase = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
__lowerCAmelCase = get_pairs(__a )
if not pairs:
return token
while True:
__lowerCAmelCase = min(__a , key=lambda __a : self.bpe_ranks.get(__a , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
__lowerCAmelCase , __lowerCAmelCase = bigram
__lowerCAmelCase = []
__lowerCAmelCase = 0
while i < len(__a ):
try:
__lowerCAmelCase = word.index(__a , __a )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__lowerCAmelCase = j
if word[i] == first and i < len(__a ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowerCAmelCase = tuple(__a )
__lowerCAmelCase = new_word
if len(__a ) == 1:
break
else:
__lowerCAmelCase = get_pairs(__a )
__lowerCAmelCase = " ".join(__a )
if word == "\n " + BPE_TOKEN_MERGES:
__lowerCAmelCase = "\n" + BPE_TOKEN_MERGES
if word.endswith(__a ):
__lowerCAmelCase = word.replace(__a , "" )
__lowerCAmelCase = word.replace(" " , __a )
__lowerCAmelCase = word
return word
def snake_case ( self , __a ):
if self.bpe_ranks is None:
raise ValueError(
"This tokenizer was instantiated without a `merges.txt` file, so"
" that it can only be used for decoding, not for encoding."
"Make sure to provide `merges.txt` file at instantiation to enable "
"encoding." )
if self.do_lower_case:
__lowerCAmelCase = text.lower()
__lowerCAmelCase = text.split()
__lowerCAmelCase = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__a ).split(" " ) ) )
return split_tokens
def snake_case ( self , __a ):
return self.encoder.get(__a , self.encoder.get(self.unk_token ) )
def snake_case ( self , __a ):
__lowerCAmelCase = self.decoder.get(__a , self.unk_token )
return result
def snake_case ( self , __a ):
__lowerCAmelCase = " ".join(__a )
# make sure @@ tokens are concatenated
__lowerCAmelCase = "".join(string.split(__a ) )
return string
def snake_case ( self , __a , __a = None ):
if not os.path.isdir(__a ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__lowerCAmelCase = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
__lowerCAmelCase = os.path.join(
__a , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__a , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__a , ensure_ascii=__a ) + "\n" )
__lowerCAmelCase = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__a , "w" , encoding="utf-8" ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __a : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."
" Please check that the tokenizer is not corrupted!" )
__lowerCAmelCase = token_index
writer.write(" ".join(__a ) + "\n" )
index += 1
return (vocab_file, merges_file)
| 57 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class _UpperCamelCase ( unittest.TestCase ):
'''simple docstring'''
__UpperCAmelCase : Union[str, Any] =MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
__UpperCAmelCase : Union[str, Any] =TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def snake_case ( self , __a , __a , __a ):
__lowerCAmelCase = TextaTextGenerationPipeline(model=__a , tokenizer=__a )
return generator, ["Something to write", "Something else"]
def snake_case ( self , __a , __a ):
__lowerCAmelCase = generator("Something there" )
self.assertEqual(__a , [{"generated_text": ANY(__a )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]["generated_text"].startswith("Something there" ) )
__lowerCAmelCase = generator(["This is great !", "Something else"] , num_return_sequences=2 , do_sample=__a )
self.assertEqual(
__a , [
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
] , )
__lowerCAmelCase = generator(
["This is great !", "Something else"] , num_return_sequences=2 , batch_size=2 , do_sample=__a )
self.assertEqual(
__a , [
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
[{"generated_text": ANY(__a )}, {"generated_text": ANY(__a )}],
] , )
with self.assertRaises(__a ):
generator(4 )
@require_torch
def snake_case ( self ):
__lowerCAmelCase = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="pt" )
# do_sample=False necessary for reproducibility
__lowerCAmelCase = generator("Something there" , do_sample=__a )
self.assertEqual(__a , [{"generated_text": ""}] )
__lowerCAmelCase = 3
__lowerCAmelCase = generator(
"Something there" , num_return_sequences=__a , num_beams=__a , )
__lowerCAmelCase = [
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": "Beide Beide Beide Beide Beide Beide Beide Beide"},
{"generated_text": ""},
]
self.assertEqual(__a , __a )
__lowerCAmelCase = generator("This is a test" , do_sample=__a , num_return_sequences=2 , return_tensors=__a )
self.assertEqual(
__a , [
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
] , )
__lowerCAmelCase = generator.model.config.eos_token_id
__lowerCAmelCase = "<pad>"
__lowerCAmelCase = generator(
["This is a test", "This is a second test"] , do_sample=__a , num_return_sequences=2 , batch_size=2 , return_tensors=__a , )
self.assertEqual(
__a , [
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
[
{"generated_token_ids": ANY(torch.Tensor )},
{"generated_token_ids": ANY(torch.Tensor )},
],
] , )
@require_tf
def snake_case ( self ):
__lowerCAmelCase = pipeline("text2text-generation" , model="patrickvonplaten/t5-tiny-random" , framework="tf" )
# do_sample=False necessary for reproducibility
__lowerCAmelCase = generator("Something there" , do_sample=__a )
self.assertEqual(__a , [{"generated_text": ""}] )
| 57 | 1 |
from __future__ import annotations
from collections.abc import MutableSequence
class lowercase :
"""simple docstring"""
def __init__( self : int , __UpperCAmelCase : int , __UpperCAmelCase : MutableSequence[float] ) -> None:
if len(__UpperCAmelCase ) != degree + 1:
raise ValueError(
"""The number of coefficients should be equal to the degree + 1.""" )
UpperCAmelCase_= list(__UpperCAmelCase )
UpperCAmelCase_= degree
def __add__( self : Any , __UpperCAmelCase : Polynomial ) -> Polynomial:
if self.degree > polynomial_a.degree:
UpperCAmelCase_= self.coefficients[:]
for i in range(polynomial_a.degree + 1 ):
coefficients[i] += polynomial_a.coefficients[i]
return Polynomial(self.degree , __UpperCAmelCase )
else:
UpperCAmelCase_= polynomial_a.coefficients[:]
for i in range(self.degree + 1 ):
coefficients[i] += self.coefficients[i]
return Polynomial(polynomial_a.degree , __UpperCAmelCase )
def __sub__( self : Tuple , __UpperCAmelCase : Polynomial ) -> Polynomial:
return self + polynomial_a * Polynomial(0 , [-1] )
def __neg__( self : List[str] ) -> Polynomial:
return Polynomial(self.degree , [-c for c in self.coefficients] )
def __mul__( self : Optional[Any] , __UpperCAmelCase : Polynomial ) -> Polynomial:
UpperCAmelCase_= [0] * (self.degree + polynomial_a.degree + 1)
for i in range(self.degree + 1 ):
for j in range(polynomial_a.degree + 1 ):
coefficients[i + j] += (
self.coefficients[i] * polynomial_a.coefficients[j]
)
return Polynomial(self.degree + polynomial_a.degree , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : int , __UpperCAmelCase : int | float ) -> int | float:
UpperCAmelCase_= 0
for i in range(self.degree + 1 ):
result += self.coefficients[i] * (substitution**i)
return result
def __str__( self : Dict ) -> str:
UpperCAmelCase_= """"""
for i in range(self.degree , -1 , -1 ):
if self.coefficients[i] == 0:
continue
elif self.coefficients[i] > 0:
if polynomial:
polynomial += " + "
else:
polynomial += " - "
if i == 0:
polynomial += str(abs(self.coefficients[i] ) )
elif i == 1:
polynomial += str(abs(self.coefficients[i] ) ) + "x"
else:
polynomial += str(abs(self.coefficients[i] ) ) + "x^" + str(__UpperCAmelCase )
return polynomial
def __repr__( self : Dict ) -> str:
return self.__str__()
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Polynomial:
UpperCAmelCase_= [0] * self.degree
for i in range(self.degree ):
UpperCAmelCase_= self.coefficients[i + 1] * (i + 1)
return Polynomial(self.degree - 1 , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Any , __UpperCAmelCase : int | float = 0 ) -> Polynomial:
UpperCAmelCase_= [0] * (self.degree + 2)
UpperCAmelCase_= constant
for i in range(self.degree + 1 ):
UpperCAmelCase_= self.coefficients[i] / (i + 1)
return Polynomial(self.degree + 1 , __UpperCAmelCase )
def __eq__( self : List[Any] , __UpperCAmelCase : object ) -> bool:
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
return False
if self.degree != polynomial_a.degree:
return False
for i in range(self.degree + 1 ):
if self.coefficients[i] != polynomial_a.coefficients[i]:
return False
return True
def __ne__( self : List[str] , __UpperCAmelCase : object ) -> bool:
return not self.__eq__(__UpperCAmelCase )
| 277 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class lowercase ( unittest.TestCase):
"""simple docstring"""
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
UpperCAmelCase_= tempfile.mkdtemp()
UpperCAmelCase_= BlipImageProcessor()
UpperCAmelCase_= GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
UpperCAmelCase_= BlipaProcessor(__UpperCAmelCase , __UpperCAmelCase )
processor.save_pretrained(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self : List[Any] , **__UpperCAmelCase : Union[str, Any] ) -> int:
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).tokenizer
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] , **__UpperCAmelCase : str ) -> Optional[int]:
return AutoProcessor.from_pretrained(self.tmpdirname , **__UpperCAmelCase ).image_processor
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
shutil.rmtree(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
UpperCAmelCase_= [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
UpperCAmelCase_= [Image.fromarray(np.moveaxis(__UpperCAmelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
UpperCAmelCase_= BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
UpperCAmelCase_= self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
UpperCAmelCase_= self.get_image_processor(do_normalize=__UpperCAmelCase , padding_value=1.0 )
UpperCAmelCase_= BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token="""(BOS)""" , eos_token="""(EOS)""" , do_normalize=__UpperCAmelCase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
UpperCAmelCase_= self.get_image_processor()
UpperCAmelCase_= self.get_tokenizer()
UpperCAmelCase_= BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
UpperCAmelCase_= self.prepare_image_inputs()
UpperCAmelCase_= image_processor(__UpperCAmelCase , return_tensors="""np""" )
UpperCAmelCase_= processor(images=__UpperCAmelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
UpperCAmelCase_= self.get_image_processor()
UpperCAmelCase_= self.get_tokenizer()
UpperCAmelCase_= BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
UpperCAmelCase_= """lower newer"""
UpperCAmelCase_= processor(text=__UpperCAmelCase )
UpperCAmelCase_= tokenizer(__UpperCAmelCase , return_token_type_ids=__UpperCAmelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
UpperCAmelCase_= self.get_image_processor()
UpperCAmelCase_= self.get_tokenizer()
UpperCAmelCase_= BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
UpperCAmelCase_= """lower newer"""
UpperCAmelCase_= self.prepare_image_inputs()
UpperCAmelCase_= processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
# test if it raises when no input is passed
with pytest.raises(__UpperCAmelCase ):
processor()
def _SCREAMING_SNAKE_CASE ( self : str ) -> Any:
UpperCAmelCase_= self.get_image_processor()
UpperCAmelCase_= self.get_tokenizer()
UpperCAmelCase_= BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
UpperCAmelCase_= [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
UpperCAmelCase_= processor.batch_decode(__UpperCAmelCase )
UpperCAmelCase_= tokenizer.batch_decode(__UpperCAmelCase )
self.assertListEqual(__UpperCAmelCase , __UpperCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
UpperCAmelCase_= self.get_image_processor()
UpperCAmelCase_= self.get_tokenizer()
UpperCAmelCase_= BlipaProcessor(tokenizer=__UpperCAmelCase , image_processor=__UpperCAmelCase )
UpperCAmelCase_= """lower newer"""
UpperCAmelCase_= self.prepare_image_inputs()
UpperCAmelCase_= processor(text=__UpperCAmelCase , images=__UpperCAmelCase )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ["""pixel_values""", """input_ids""", """attention_mask"""] )
| 277 | 1 |
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def __lowercase ( a__ ) -> Dict:
return x + 2
class UpperCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 'x = 3'
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = evaluate(_A , {} , state=_A )
assert result == 3
self.assertDictEqual(_A , {'x': 3} )
__SCREAMING_SNAKE_CASE = 'x = y'
__SCREAMING_SNAKE_CASE = {'y': 5}
__SCREAMING_SNAKE_CASE = evaluate(_A , {} , state=_A )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_A , {'x': 5, 'y': 5} )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 'y = add_two(x)'
__SCREAMING_SNAKE_CASE = {'x': 3}
__SCREAMING_SNAKE_CASE = evaluate(_A , {'add_two': add_two} , state=_A )
assert result == 5
self.assertDictEqual(_A , {'x': 3, 'y': 5} )
# Won't work without the tool
with CaptureStdout() as out:
__SCREAMING_SNAKE_CASE = evaluate(_A , {} , state=_A )
assert result is None
assert "tried to execute add_two" in out.out
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 'x = 3'
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = evaluate(_A , {} , state=_A )
assert result == 3
self.assertDictEqual(_A , {'x': 3} )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 'test_dict = {\'x\': x, \'y\': add_two(x)}'
__SCREAMING_SNAKE_CASE = {'x': 3}
__SCREAMING_SNAKE_CASE = evaluate(_A , {'add_two': add_two} , state=_A )
self.assertDictEqual(_A , {'x': 3, 'y': 5} )
self.assertDictEqual(_A , {'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 'x = 3\ny = 5'
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = evaluate(_A , {} , state=_A )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_A , {'x': 3, 'y': 5} )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 'text = f\'This is x: {x}.\''
__SCREAMING_SNAKE_CASE = {'x': 3}
__SCREAMING_SNAKE_CASE = evaluate(_A , {} , state=_A )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(_A , {'x': 3, 'text': 'This is x: 3.'} )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 'if x <= 3:\n y = 2\nelse:\n y = 5'
__SCREAMING_SNAKE_CASE = {'x': 3}
__SCREAMING_SNAKE_CASE = evaluate(_A , {} , state=_A )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(_A , {'x': 3, 'y': 2} )
__SCREAMING_SNAKE_CASE = {'x': 8}
__SCREAMING_SNAKE_CASE = evaluate(_A , {} , state=_A )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(_A , {'x': 8, 'y': 5} )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 'test_list = [x, add_two(x)]'
__SCREAMING_SNAKE_CASE = {'x': 3}
__SCREAMING_SNAKE_CASE = evaluate(_A , {'add_two': add_two} , state=_A )
self.assertListEqual(_A , [3, 5] )
self.assertDictEqual(_A , {'x': 3, 'test_list': [3, 5]} )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 'y = x'
__SCREAMING_SNAKE_CASE = {'x': 3}
__SCREAMING_SNAKE_CASE = evaluate(_A , {} , state=_A )
assert result == 3
self.assertDictEqual(_A , {'x': 3, 'y': 3} )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 'test_list = [x, add_two(x)]\ntest_list[1]'
__SCREAMING_SNAKE_CASE = {'x': 3}
__SCREAMING_SNAKE_CASE = evaluate(_A , {'add_two': add_two} , state=_A )
assert result == 5
self.assertDictEqual(_A , {'x': 3, 'test_list': [3, 5]} )
__SCREAMING_SNAKE_CASE = 'test_dict = {\'x\': x, \'y\': add_two(x)}\ntest_dict[\'y\']'
__SCREAMING_SNAKE_CASE = {'x': 3}
__SCREAMING_SNAKE_CASE = evaluate(_A , {'add_two': add_two} , state=_A )
assert result == 5
self.assertDictEqual(_A , {'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 'x = 0\nfor i in range(3):\n x = i'
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = evaluate(_A , {'range': range} , state=_A )
assert result == 2
self.assertDictEqual(_A , {'x': 2, 'i': 2} )
| 257 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class UpperCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : jnp.ndarray
UpperCamelCase__ : jnp.ndarray
class UpperCAmelCase_ ( nn.Module ):
'''simple docstring'''
UpperCamelCase__ : int
UpperCamelCase__ : Tuple[int] = (16, 32, 96, 256)
UpperCamelCase__ : jnp.dtype = jnp.floataa
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
__SCREAMING_SNAKE_CASE = []
for i in range(len(self.block_out_channels ) - 1 ):
__SCREAMING_SNAKE_CASE = self.block_out_channels[i]
__SCREAMING_SNAKE_CASE = self.block_out_channels[i + 1]
__SCREAMING_SNAKE_CASE = nn.Conv(
_A , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_A )
__SCREAMING_SNAKE_CASE = nn.Conv(
_A , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(_A )
__SCREAMING_SNAKE_CASE = blocks
__SCREAMING_SNAKE_CASE = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.conv_in(_A )
__SCREAMING_SNAKE_CASE = nn.silu(_A )
for block in self.blocks:
__SCREAMING_SNAKE_CASE = block(_A )
__SCREAMING_SNAKE_CASE = nn.silu(_A )
__SCREAMING_SNAKE_CASE = self.conv_out(_A )
return embedding
@flax_register_to_config
class UpperCAmelCase_ ( nn.Module , UpperCamelCase_ , UpperCamelCase_ ):
'''simple docstring'''
UpperCamelCase__ : int = 32
UpperCamelCase__ : int = 4
UpperCamelCase__ : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
UpperCamelCase__ : Union[bool, Tuple[bool]] = False
UpperCamelCase__ : Tuple[int] = (320, 640, 1280, 1280)
UpperCamelCase__ : int = 2
UpperCamelCase__ : Union[int, Tuple[int]] = 8
UpperCamelCase__ : Optional[Union[int, Tuple[int]]] = None
UpperCamelCase__ : int = 1280
UpperCamelCase__ : float = 0.0
UpperCamelCase__ : bool = False
UpperCamelCase__ : jnp.dtype = jnp.floataa
UpperCamelCase__ : bool = True
UpperCamelCase__ : int = 0
UpperCamelCase__ : str = "rgb"
UpperCamelCase__ : Tuple[int] = (16, 32, 96, 256)
def _A ( self , _A ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (1, self.in_channels, self.sample_size, self.sample_size)
__SCREAMING_SNAKE_CASE = jnp.zeros(_A , dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE = jnp.ones((1,) , dtype=jnp.intaa )
__SCREAMING_SNAKE_CASE = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE = (1, 3, self.sample_size * 8, self.sample_size * 8)
__SCREAMING_SNAKE_CASE = jnp.zeros(_A , dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = jax.random.split(_A )
__SCREAMING_SNAKE_CASE = {'params': params_rng, 'dropout': dropout_rng}
return self.init(_A , _A , _A , _A , _A )["params"]
def _A ( self ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.block_out_channels
__SCREAMING_SNAKE_CASE = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
__SCREAMING_SNAKE_CASE = self.num_attention_heads or self.attention_head_dim
# input
__SCREAMING_SNAKE_CASE = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
__SCREAMING_SNAKE_CASE = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
__SCREAMING_SNAKE_CASE = FlaxTimestepEmbedding(_A , dtype=self.dtype )
__SCREAMING_SNAKE_CASE = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
__SCREAMING_SNAKE_CASE = self.only_cross_attention
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE = (only_cross_attention,) * len(self.down_block_types )
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE = (num_attention_heads,) * len(self.down_block_types )
# down
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = block_out_channels[0]
__SCREAMING_SNAKE_CASE = nn.Conv(
_A , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_A )
for i, down_block_type in enumerate(self.down_block_types ):
__SCREAMING_SNAKE_CASE = output_channel
__SCREAMING_SNAKE_CASE = block_out_channels[i]
__SCREAMING_SNAKE_CASE = i == len(_A ) - 1
if down_block_type == "CrossAttnDownBlock2D":
__SCREAMING_SNAKE_CASE = FlaxCrossAttnDownBlockaD(
in_channels=_A , out_channels=_A , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
__SCREAMING_SNAKE_CASE = FlaxDownBlockaD(
in_channels=_A , out_channels=_A , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(_A )
for _ in range(self.layers_per_block ):
__SCREAMING_SNAKE_CASE = nn.Conv(
_A , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_A )
if not is_final_block:
__SCREAMING_SNAKE_CASE = nn.Conv(
_A , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(_A )
__SCREAMING_SNAKE_CASE = down_blocks
__SCREAMING_SNAKE_CASE = controlnet_down_blocks
# mid
__SCREAMING_SNAKE_CASE = block_out_channels[-1]
__SCREAMING_SNAKE_CASE = FlaxUNetMidBlockaDCrossAttn(
in_channels=_A , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
__SCREAMING_SNAKE_CASE = nn.Conv(
_A , kernel_size=(1, 1) , padding='VALID' , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , _A , _A , _A , _A , _A = 1.0 , _A = True , _A = False , ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
__SCREAMING_SNAKE_CASE = jnp.flip(_A , axis=1 )
# 1. time
if not isinstance(_A , jnp.ndarray ):
__SCREAMING_SNAKE_CASE = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(_A , jnp.ndarray ) and len(timesteps.shape ) == 0:
__SCREAMING_SNAKE_CASE = timesteps.astype(dtype=jnp.floataa )
__SCREAMING_SNAKE_CASE = jnp.expand_dims(_A , 0 )
__SCREAMING_SNAKE_CASE = self.time_proj(_A )
__SCREAMING_SNAKE_CASE = self.time_embedding(_A )
# 2. pre-process
__SCREAMING_SNAKE_CASE = jnp.transpose(_A , (0, 2, 3, 1) )
__SCREAMING_SNAKE_CASE = self.conv_in(_A )
__SCREAMING_SNAKE_CASE = jnp.transpose(_A , (0, 2, 3, 1) )
__SCREAMING_SNAKE_CASE = self.controlnet_cond_embedding(_A )
sample += controlnet_cond
# 3. down
__SCREAMING_SNAKE_CASE = (sample,)
for down_block in self.down_blocks:
if isinstance(_A , _A ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = down_block(_A , _A , _A , deterministic=not train )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = down_block(_A , _A , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
__SCREAMING_SNAKE_CASE = self.mid_block(_A , _A , _A , deterministic=not train )
# 5. contronet blocks
__SCREAMING_SNAKE_CASE = ()
for down_block_res_sample, controlnet_block in zip(_A , self.controlnet_down_blocks ):
__SCREAMING_SNAKE_CASE = controlnet_block(_A )
controlnet_down_block_res_samples += (down_block_res_sample,)
__SCREAMING_SNAKE_CASE = controlnet_down_block_res_samples
__SCREAMING_SNAKE_CASE = self.controlnet_mid_block(_A )
# 6. scaling
__SCREAMING_SNAKE_CASE = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=_A , mid_block_res_sample=_A )
| 257 | 1 |
'''simple docstring'''
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class __snake_case:
'''simple docstring'''
def __init__( self , A_ , A_=13 , A_=32 , A_=2 , A_=3 , A_=16 , A_=[1, 2, 1] , A_=[2, 2, 4] , A_=2 , A_=2.0 , A_=True , A_=0.0 , A_=0.0 , A_=0.1 , A_="gelu" , A_=False , A_=True , A_=0.0_2 , A_=1e-5 , A_=True , A_=None , A_=True , A_=10 , A_=8 , A_=["stage1", "stage2", "stage3"] , A_=[1, 2, 3] , ) -> List[str]:
lowerCAmelCase = parent
lowerCAmelCase = batch_size
lowerCAmelCase = image_size
lowerCAmelCase = patch_size
lowerCAmelCase = num_channels
lowerCAmelCase = embed_dim
lowerCAmelCase = depths
lowerCAmelCase = num_heads
lowerCAmelCase = window_size
lowerCAmelCase = mlp_ratio
lowerCAmelCase = qkv_bias
lowerCAmelCase = hidden_dropout_prob
lowerCAmelCase = attention_probs_dropout_prob
lowerCAmelCase = drop_path_rate
lowerCAmelCase = hidden_act
lowerCAmelCase = use_absolute_embeddings
lowerCAmelCase = patch_norm
lowerCAmelCase = layer_norm_eps
lowerCAmelCase = initializer_range
lowerCAmelCase = is_training
lowerCAmelCase = scope
lowerCAmelCase = use_labels
lowerCAmelCase = type_sequence_label_size
lowerCAmelCase = encoder_stride
lowerCAmelCase = out_features
lowerCAmelCase = out_indices
def __snake_case ( self ) -> Tuple:
lowerCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase = None
if self.use_labels:
lowerCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase = self.get_config()
return config, pixel_values, labels
def __snake_case ( self ) -> str:
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def __snake_case ( self , A_ , A_ , A_ ) -> Tuple:
lowerCAmelCase = MaskFormerSwinModel(config=A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = model(A_ )
lowerCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __snake_case ( self , A_ , A_ , A_ ) -> Optional[Any]:
lowerCAmelCase = MaskFormerSwinBackbone(config=A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = model(A_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(A_ ):
lowerCAmelCase = ["""stem"""]
lowerCAmelCase = MaskFormerSwinBackbone(config=A_ )
def __snake_case ( self ) -> List[str]:
lowerCAmelCase = self.prepare_config_and_inputs()
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = config_and_inputs
lowerCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __snake_case( _lowerCAmelCase , _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
UpperCAmelCase : int = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
UpperCAmelCase : str = {"feature-extraction": MaskFormerSwinModel} if is_torch_available() else {}
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : Union[str, Any] = False
UpperCAmelCase : Dict = False
UpperCAmelCase : Optional[int] = False
UpperCAmelCase : Union[str, Any] = False
def __snake_case ( self ) -> Dict:
lowerCAmelCase = MaskFormerSwinModelTester(self )
lowerCAmelCase = ConfigTester(self , config_class=A_ , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def __snake_case ( self ) -> List[Any]:
pass
def __snake_case ( self ) -> Tuple:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __snake_case ( self ) -> List[Any]:
return
def __snake_case ( self ) -> Union[str, Any]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def __snake_case ( self ) -> Optional[int]:
lowerCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*A_ )
@unittest.skip("""Swin does not use inputs_embeds""" )
def __snake_case ( self ) -> str:
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def __snake_case ( self ) -> Optional[int]:
pass
def __snake_case ( self ) -> Optional[int]:
lowerCAmelCase, lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def __snake_case ( self ) -> str:
lowerCAmelCase, lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(A_ )
lowerCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase = [*signature.parameters.keys()]
lowerCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , A_ )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def __snake_case ( self ) -> int:
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def __snake_case ( self ) -> List[Any]:
pass
def __snake_case ( self , A_ , A_ , A_ , A_ ) -> Union[str, Any]:
lowerCAmelCase = model_class(A_ )
model.to(A_ )
model.eval()
with torch.no_grad():
lowerCAmelCase = model(**self._prepare_for_class(A_ , A_ ) )
lowerCAmelCase = outputs.hidden_states
lowerCAmelCase = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(A_ ) , A_ )
# Swin has a different seq_length
lowerCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __snake_case ( self ) -> List[Any]:
lowerCAmelCase, lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowerCAmelCase = True
self.check_hidden_states_output(A_ , A_ , A_ , A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase = True
self.check_hidden_states_output(A_ , A_ , A_ , A_ )
def __snake_case ( self ) -> Any:
lowerCAmelCase, lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = 3
lowerCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowerCAmelCase = True
self.check_hidden_states_output(A_ , A_ , A_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase = True
self.check_hidden_states_output(A_ , A_ , A_ , (padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def __snake_case ( self ) -> List[str]:
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def __snake_case ( self ) -> Any:
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def __snake_case ( self ) -> List[str]:
pass
def __snake_case ( self ) -> List[Any]:
lowerCAmelCase, lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(A_ ):
lowerCAmelCase = 0
return t
def check_equivalence(A_ , A_ , A_ , A_={} ):
with torch.no_grad():
lowerCAmelCase = model(**A_ , return_dict=A_ , **A_ )
lowerCAmelCase = model(**A_ , return_dict=A_ , **A_ ).to_tuple()
def recursive_check(A_ , A_ ):
if isinstance(A_ , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(A_ , A_ ):
recursive_check(A_ , A_ )
elif isinstance(A_ , A_ ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(A_ , A_ )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(A_ ) , set_nan_tensor_to_zero(A_ ) , atol=1e-5 ) , msg=(
"""Tuple and dict output are not equal. Difference:"""
f' {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:'
f' {torch.isnan(A_ ).any()} and `inf`: {torch.isinf(A_ )}. Dict has'
f' `nan`: {torch.isnan(A_ ).any()} and `inf`: {torch.isinf(A_ )}.'
) , )
recursive_check(A_ , A_ )
for model_class in self.all_model_classes:
lowerCAmelCase = model_class(A_ )
model.to(A_ )
model.eval()
lowerCAmelCase = self._prepare_for_class(A_ , A_ )
lowerCAmelCase = self._prepare_for_class(A_ , A_ )
check_equivalence(A_ , A_ , A_ )
lowerCAmelCase = self._prepare_for_class(A_ , A_ , return_labels=A_ )
lowerCAmelCase = self._prepare_for_class(A_ , A_ , return_labels=A_ )
check_equivalence(A_ , A_ , A_ )
lowerCAmelCase = self._prepare_for_class(A_ , A_ )
lowerCAmelCase = self._prepare_for_class(A_ , A_ )
check_equivalence(A_ , A_ , A_ , {"""output_hidden_states""": True} )
lowerCAmelCase = self._prepare_for_class(A_ , A_ , return_labels=A_ )
lowerCAmelCase = self._prepare_for_class(A_ , A_ , return_labels=A_ )
check_equivalence(A_ , A_ , A_ , {"""output_hidden_states""": True} )
@require_torch
class __snake_case( unittest.TestCase , _lowerCAmelCase ):
'''simple docstring'''
UpperCAmelCase : int = (MaskFormerSwinBackbone,) if is_torch_available() else ()
UpperCAmelCase : str = MaskFormerSwinConfig
def __snake_case ( self ) -> Dict:
lowerCAmelCase = MaskFormerSwinModelTester(self )
def __snake_case ( self ) -> Any:
lowerCAmelCase, lowerCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
lowerCAmelCase = backbone_class(A_ )
backbone.to(A_ )
backbone.eval()
lowerCAmelCase = backbone(**A_ )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , A_ )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
lowerCAmelCase = backbone(**A_ , output_hidden_states=A_ )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
lowerCAmelCase, lowerCAmelCase, lowerCAmelCase = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
lowerCAmelCase = backbone(**A_ , output_attentions=A_ )
self.assertIsNotNone(outputs.attentions ) | 187 |
'''simple docstring'''
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
pass
class __snake_case( _lowerCAmelCase ):
'''simple docstring'''
pass
class __snake_case:
'''simple docstring'''
def __init__( self ) -> int:
lowerCAmelCase = [
[],
[],
[],
]
def __snake_case ( self , A_ , A_ ) -> None:
try:
if len(self.queues[priority] ) >= 100:
raise OverflowError("""Maximum queue size is 100""" )
self.queues[priority].append(A_ )
except IndexError:
raise ValueError("""Valid priorities are 0, 1, and 2""" )
def __snake_case ( self ) -> int:
for queue in self.queues:
if queue:
return queue.pop(0 )
raise UnderFlowError("""All queues are empty""" )
def __str__( self ) -> str:
return "\n".join(f'Priority {i}: {q}' for i, q in enumerate(self.queues ) )
class __snake_case:
'''simple docstring'''
def __init__( self ) -> Dict:
lowerCAmelCase = []
def __snake_case ( self , A_ ) -> None:
if len(self.queue ) == 100:
raise OverFlowError("""Maximum queue size is 100""" )
self.queue.append(A_ )
def __snake_case ( self ) -> int:
if not self.queue:
raise UnderFlowError("""The queue is empty""" )
else:
lowerCAmelCase = min(self.queue )
self.queue.remove(A_ )
return data
def __str__( self ) -> str:
return str(self.queue )
def _snake_case ( ) -> Tuple:
"""simple docstring"""
lowerCAmelCase = FixedPriorityQueue()
fpq.enqueue(0 , 10 )
fpq.enqueue(1 , 70 )
fpq.enqueue(0 , 100 )
fpq.enqueue(2 , 1 )
fpq.enqueue(2 , 5 )
fpq.enqueue(1 , 7 )
fpq.enqueue(2 , 4 )
fpq.enqueue(1 , 64 )
fpq.enqueue(0 , 128 )
print(_SCREAMING_SNAKE_CASE )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(_SCREAMING_SNAKE_CASE )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
print(fpq.dequeue() )
def _snake_case ( ) -> Optional[Any]:
"""simple docstring"""
lowerCAmelCase = ElementPriorityQueue()
epq.enqueue(10 )
epq.enqueue(70 )
epq.enqueue(100 )
epq.enqueue(1 )
epq.enqueue(5 )
epq.enqueue(7 )
epq.enqueue(4 )
epq.enqueue(64 )
epq.enqueue(128 )
print(_SCREAMING_SNAKE_CASE )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(_SCREAMING_SNAKE_CASE )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
print(epq.dequeue() )
if __name__ == "__main__":
fixed_priority_queue()
element_priority_queue() | 187 | 1 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = torch.device("""cpu""")
def __lowerCAmelCase ():
__lowerCAmelCase : List[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__lowerCAmelCase : Dict = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw )
return im
def __lowerCAmelCase (_UpperCamelCase ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0, 8.8_6_8_5e-0_1, 2.4_3_6_0e-0_1] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9_6_3_6e-0_1, 2.3_4_7_8e-0_1, -1.6_9_6_3e0_0, -1.7_3_8_1e0_0, -8.6_3_3_7e-0_1] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2_7_6_8e-0_1, -4.7_4_2_9e-0_1, -1.0_8_9_7e0_0, -1.0_2_4_8e0_0, 3.5_5_2_3e-0_2] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5_3_3_0e-0_1, 2.4_2_1_1e-0_1, -6.0_1_8_5e-0_1, -8.2_7_8_9e-0_1, -6.0_4_4_6e-0_2] )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Tuple = dct.pop(_UpperCamelCase )
__lowerCAmelCase : Dict = val
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Optional[Any] = []
for k in state_dict.keys():
__lowerCAmelCase : str = k
if ".pwconv" in k:
__lowerCAmelCase : List[str] = k_new.replace('.pwconv' , '.point_wise_conv' )
if ".dwconv" in k:
__lowerCAmelCase : Tuple = k_new.replace('.dwconv' , '.depth_wise_conv' )
if ".Proj." in k:
__lowerCAmelCase : Optional[int] = k_new.replace('.Proj.' , '.proj.' )
if "patch_embed" in k_new:
__lowerCAmelCase : List[Any] = k_new.replace('patch_embed' , 'swiftformer.patch_embed.patch_embedding' )
if "network" in k_new:
__lowerCAmelCase : Optional[Any] = k_new.split('.' )
if ls[2].isdigit():
__lowerCAmelCase : Union[str, Any] = 'swiftformer.encoder.network.' + ls[1] + '.blocks.' + ls[2] + '.' + '.'.join(ls[3:] )
else:
__lowerCAmelCase : int = k_new.replace('network' , 'swiftformer.encoder.network' )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Tuple = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
__lowerCAmelCase : Tuple = 1000
__lowerCAmelCase : Tuple = 'huggingface/label-files'
__lowerCAmelCase : Any = 'imagenet-1k-id2label.json'
__lowerCAmelCase : Dict = json.load(open(hf_hub_download(_UpperCamelCase , _UpperCamelCase , repo_type='dataset' ) , 'r' ) )
__lowerCAmelCase : Optional[Any] = {int(_UpperCamelCase ): v for k, v in idalabel.items()}
__lowerCAmelCase : Optional[Any] = idalabel
__lowerCAmelCase : Any = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
__lowerCAmelCase : Optional[int] = [3, 3, 6, 4]
__lowerCAmelCase : Union[str, Any] = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
__lowerCAmelCase : str = [3, 3, 9, 6]
__lowerCAmelCase : Tuple = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
__lowerCAmelCase : int = [4, 3, 10, 5]
__lowerCAmelCase : List[Any] = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
__lowerCAmelCase : Union[str, Any] = [4, 4, 12, 6]
__lowerCAmelCase : List[str] = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith('https' ):
__lowerCAmelCase : List[Any] = torch.hub.load_state_dict_from_url(_UpperCamelCase , map_location='cpu' , check_hash=_UpperCamelCase )
else:
__lowerCAmelCase : Optional[int] = torch.load(_UpperCamelCase , map_location='cpu' )
__lowerCAmelCase : List[Any] = checkpoint
__lowerCAmelCase : Optional[int] = create_rename_keys(_UpperCamelCase )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# load HuggingFace model
__lowerCAmelCase : Optional[Any] = SwiftFormerForImageClassification(_UpperCamelCase ).eval()
hf_model.load_state_dict(_UpperCamelCase )
# prepare test inputs
__lowerCAmelCase : Optional[Any] = prepare_img()
__lowerCAmelCase : Optional[Any] = ViTImageProcessor.from_pretrained('preprocessor_config' )
__lowerCAmelCase : Dict = processor(images=_UpperCamelCase , return_tensors='pt' )
# compare outputs from both models
__lowerCAmelCase : int = get_expected_output(_UpperCamelCase )
__lowerCAmelCase : Tuple = hf_model(inputs['pixel_values'] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , _UpperCamelCase , atol=1e-3 )
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
print(F"Saving model {swiftformer_name} to {pytorch_dump_folder_path}" )
hf_model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swiftformer_name""",
default="""swiftformer_xs""",
choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""],
type=str,
help="""Name of the SwiftFormer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""./converted_outputs/""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""")
lowerCamelCase__ = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt) | 86 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
UpperCAmelCase__ = pytest.mark.integration
@require_faiss
class lowercase_ ( lowercase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Tuple ) ->Any:
"""simple docstring"""
a = Dataset.from_dict({'''filename''': ['''my_name-train''' + '''_''' + str(__UpperCAmelCase ) for x in np.arange(30 ).tolist()]} )
return dset
def __lowerCAmelCase ( self : Tuple ) ->Any:
"""simple docstring"""
import faiss
a = self._create_dummy_dataset()
a = dset.map(
lambda __UpperCAmelCase , __UpperCAmelCase : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=__UpperCAmelCase , keep_in_memory=__UpperCAmelCase )
a = dset.add_faiss_index('''vecs''' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
a , a = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
dset.drop_index('''vecs''' )
def __lowerCAmelCase ( self : Union[str, Any] ) ->Optional[Any]:
"""simple docstring"""
import faiss
a = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
a , a = dset.get_nearest_examples('''vecs''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def __lowerCAmelCase ( self : Any ) ->Union[str, Any]:
"""simple docstring"""
import faiss
a = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__UpperCAmelCase ) as tmp_file:
dset.save_faiss_index('''vecs''' , tmp_file.name )
dset.load_faiss_index('''vecs2''' , tmp_file.name )
os.unlink(tmp_file.name )
a , a = dset.get_nearest_examples('''vecs2''' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
def __lowerCAmelCase ( self : Optional[Any] ) ->Any:
"""simple docstring"""
a = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='''vecs''' )
dset.drop_index('''vecs''' )
self.assertRaises(__UpperCAmelCase , partial(dset.get_nearest_examples , '''vecs2''' , np.ones(5 , dtype=np.floataa ) ) )
def __lowerCAmelCase ( self : List[Any] ) ->List[str]:
"""simple docstring"""
from elasticsearch import Elasticsearch
a = self._create_dummy_dataset()
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
a = {'''acknowledged''': True}
mocked_bulk.return_value([(True, None)] * 30 )
a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 29}]}}
a = Elasticsearch()
dset.add_elasticsearch_index('''filename''' , es_client=__UpperCAmelCase )
a , a = dset.get_nearest_examples('''filename''' , '''my_name-train_29''' )
self.assertEqual(examples['''filename'''][0] , '''my_name-train_29''' )
@require_faiss
class lowercase_ ( lowercase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Any ) ->Any:
"""simple docstring"""
import faiss
a = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
a = np.zeros(5 , dtype=np.floataa )
a = 1
a , a = index.search(__UpperCAmelCase )
self.assertRaises(__UpperCAmelCase , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
a = np.eye(5 , dtype=np.floataa )[::-1]
a , a = index.search_batch(__UpperCAmelCase )
self.assertRaises(__UpperCAmelCase , index.search_batch , queries[0] )
a = [scores[0] for scores in total_scores]
a = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__UpperCAmelCase ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , __UpperCAmelCase )
def __lowerCAmelCase ( self : Any ) ->List[Any]:
"""simple docstring"""
import faiss
a = FaissIndex(string_factory='''Flat''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
a = FaissIndex(string_factory='''LSH''' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(__UpperCAmelCase ):
a = FaissIndex(string_factory='''Flat''' , custom_index=faiss.IndexFlat(5 ) )
def __lowerCAmelCase ( self : int ) ->Optional[Any]:
"""simple docstring"""
import faiss
a = faiss.IndexFlat(5 )
a = FaissIndex(custom_index=__UpperCAmelCase )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def __lowerCAmelCase ( self : int ) ->Dict:
"""simple docstring"""
import faiss
a = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=__UpperCAmelCase ) as tmp_file:
index.save(tmp_file.name )
a = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
a = np.zeros(5 , dtype=np.floataa )
a = 1
a , a = index.search(__UpperCAmelCase )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def _a ( a :Dict ) -> Any:
import faiss
a = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
a = '''index.faiss'''
a = F"""mock://{index_name}"""
index.save(a , storage_options=mockfs.storage_options )
a = FaissIndex.load(a , storage_options=mockfs.storage_options )
a = np.zeros(5 , dtype=np.floataa )
a = 1
a , a = index.search(a )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class lowercase_ ( lowercase ):
'''simple docstring'''
def __lowerCAmelCase ( self : int ) ->List[Any]:
"""simple docstring"""
from elasticsearch import Elasticsearch
with patch('''elasticsearch.Elasticsearch.search''' ) as mocked_search, patch(
'''elasticsearch.client.IndicesClient.create''' ) as mocked_index_create, patch('''elasticsearch.helpers.streaming_bulk''' ) as mocked_bulk:
a = Elasticsearch()
a = {'''acknowledged''': True}
a = ElasticSearchIndex(es_client=__UpperCAmelCase )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['''foo''', '''bar''', '''foobar'''] )
# single query
a = '''foo'''
a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
a , a = index.search(__UpperCAmelCase )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
a = '''foo'''
a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 0}]}}
a , a = index.search(__UpperCAmelCase , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
a = ['''foo''', '''bar''', '''foobar''']
a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
a , a = index.search_batch(__UpperCAmelCase )
a = [scores[0] for scores in total_scores]
a = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__UpperCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __UpperCAmelCase )
# batched queries with timeout
a = ['''foo''', '''bar''', '''foobar''']
a = {'''hits''': {'''hits''': [{'''_score''': 1, '''_id''': 1}]}}
a , a = index.search_batch(__UpperCAmelCase , request_timeout=30 )
a = [scores[0] for scores in total_scores]
a = [indices[0] for indices in total_indices]
self.assertGreater(np.min(__UpperCAmelCase ) , 0 )
self.assertListEqual([1, 1, 1] , __UpperCAmelCase )
| 0 | 0 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class lowerCamelCase__ ( __lowerCamelCase):
'''simple docstring'''
snake_case_ =""""""
snake_case_ ="""hf-legacy""" # "hf://"" is reserved for hffs
def __init__(self ,__lowerCamelCase = None ,__lowerCamelCase = None ,**__lowerCamelCase ,) -> Tuple:
"""simple docstring"""
super().__init__(self ,**__lowercase )
lowerCAmelCase__ : int = repo_info
lowerCAmelCase__ : Tuple = token
lowerCAmelCase__ : int = None
def lowerCAmelCase__ (self ) -> Tuple:
"""simple docstring"""
if self.dir_cache is None:
lowerCAmelCase__ : int = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
lowerCAmelCase__ : str = {
'''name''': hf_file.rfilename,
'''size''': None,
'''type''': '''file''',
}
self.dir_cache.update(
{
str(__lowercase ): {'''name''': str(__lowercase ), '''size''': None, '''type''': '''directory'''}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase = "rb" ,**__lowerCamelCase ,) -> List[str]:
"""simple docstring"""
if not isinstance(self.repo_info ,__lowercase ):
raise NotImplementedError(f"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
lowerCAmelCase__ : int = hf_hub_url(self.repo_info.id ,__lowercase ,revision=self.repo_info.sha )
return fsspec.open(
__lowercase ,mode=__lowercase ,headers=get_authentication_headers_for_url(__lowercase ,use_auth_token=self.token ) ,client_kwargs={'''trust_env''': True} ,).open()
def lowerCAmelCase__ (self ,__lowerCamelCase ,**__lowerCamelCase ) -> Dict:
"""simple docstring"""
self._get_dirs()
lowerCAmelCase__ : Tuple = self._strip_protocol(__lowercase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__lowercase )
def lowerCAmelCase__ (self ,__lowerCamelCase ,__lowerCamelCase=False ,**__lowerCamelCase ) -> Optional[Any]:
"""simple docstring"""
self._get_dirs()
lowerCAmelCase__ : Optional[int] = PurePosixPath(path.strip('''/''' ) )
lowerCAmelCase__ : Optional[int] = {}
for p, f in self.dir_cache.items():
lowerCAmelCase__ : Optional[int] = PurePosixPath(p.strip('''/''' ) )
lowerCAmelCase__ : List[str] = p.parent
if root == path:
lowerCAmelCase__ : Dict = f
lowerCAmelCase__ : Union[str, Any] = list(paths.values() )
if detail:
return out
else:
return sorted(f['''name'''] for f in out )
| 350 |
def lowerCAmelCase__ ( lowerCamelCase_ : int ,lowerCamelCase_ : int):
'''simple docstring'''
while b:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = b, a % b
return a
def lowerCAmelCase__ ( lowerCamelCase_ : int ,lowerCamelCase_ : int):
'''simple docstring'''
return a if b == 0 else euclidean_gcd_recursive(lowerCamelCase_ ,a % b)
def lowerCAmelCase__ ( ):
'''simple docstring'''
print(f"""euclidean_gcd(3, 5) = {euclidean_gcd(3 ,5)}""")
print(f"""euclidean_gcd(5, 3) = {euclidean_gcd(5 ,3)}""")
print(f"""euclidean_gcd(1, 3) = {euclidean_gcd(1 ,3)}""")
print(f"""euclidean_gcd(3, 6) = {euclidean_gcd(3 ,6)}""")
print(f"""euclidean_gcd(6, 3) = {euclidean_gcd(6 ,3)}""")
print(f"""euclidean_gcd_recursive(3, 5) = {euclidean_gcd_recursive(3 ,5)}""")
print(f"""euclidean_gcd_recursive(5, 3) = {euclidean_gcd_recursive(5 ,3)}""")
print(f"""euclidean_gcd_recursive(1, 3) = {euclidean_gcd_recursive(1 ,3)}""")
print(f"""euclidean_gcd_recursive(3, 6) = {euclidean_gcd_recursive(3 ,6)}""")
print(f"""euclidean_gcd_recursive(6, 3) = {euclidean_gcd_recursive(6 ,3)}""")
if __name__ == "__main__":
main()
| 94 | 0 |
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def A ( *_UpperCAmelCase : Optional[int] , _UpperCAmelCase : List[Any] = None , _UpperCAmelCase : str=True , _UpperCAmelCase : List[Any]=2 ) -> Any:
'''simple docstring'''
from .. import __version__
_UpperCAmelCase = take_from
_UpperCAmelCase = ()
if not isinstance(args[0] , _UpperCAmelCase ):
_UpperCAmelCase = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(_UpperCAmelCase ).base_version ) >= version.parse(_UpperCAmelCase ):
raise ValueError(
F"The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\'"
F" version {__version__} is >= {version_name}" )
_UpperCAmelCase = None
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(_UpperCAmelCase ),)
_UpperCAmelCase = F"The `{attribute}` argument is deprecated and will be removed in version {version_name}."
elif hasattr(_UpperCAmelCase , _UpperCAmelCase ):
values += (getattr(_UpperCAmelCase , _UpperCAmelCase ),)
_UpperCAmelCase = F"The `{attribute}` attribute is deprecated and will be removed in version {version_name}."
elif deprecated_kwargs is None:
_UpperCAmelCase = F"`{attribute}` is deprecated and will be removed in version {version_name}."
if warning is not None:
_UpperCAmelCase = warning + ''' ''' if standard_warn else ''''''
warnings.warn(warning + message , _UpperCAmelCase , stacklevel=_UpperCAmelCase )
if isinstance(_UpperCAmelCase , _UpperCAmelCase ) and len(_UpperCAmelCase ) > 0:
_UpperCAmelCase = inspect.getouterframes(inspect.currentframe() )[1]
_UpperCAmelCase = call_frame.filename
_UpperCAmelCase = call_frame.lineno
_UpperCAmelCase = call_frame.function
_UpperCAmelCase = next(iter(deprecated_kwargs.items() ) )
raise TypeError(F"{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`" )
if len(_UpperCAmelCase ) == 0:
return
elif len(_UpperCAmelCase ) == 1:
return values[0]
return values
| 339 |
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
UpperCamelCase = logging.get_logger(__name__)
UpperCamelCase = {
"""t5-small""": """https://huggingface.co/t5-small/resolve/main/config.json""",
"""t5-base""": """https://huggingface.co/t5-base/resolve/main/config.json""",
"""t5-large""": """https://huggingface.co/t5-large/resolve/main/config.json""",
"""t5-3b""": """https://huggingface.co/t5-3b/resolve/main/config.json""",
"""t5-11b""": """https://huggingface.co/t5-11b/resolve/main/config.json""",
}
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
snake_case = "t5"
snake_case = ["past_key_values"]
snake_case = {"hidden_size": "d_model", "num_attention_heads": "num_heads", "num_hidden_layers": "num_layers"}
def __init__( self , _SCREAMING_SNAKE_CASE=3_2128 , _SCREAMING_SNAKE_CASE=512 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=2048 , _SCREAMING_SNAKE_CASE=6 , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=128 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=1e-6 , _SCREAMING_SNAKE_CASE=1.0 , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=1 , **_SCREAMING_SNAKE_CASE , )->List[Any]:
'''simple docstring'''
A_ : List[Any] = vocab_size
A_ : int = d_model
A_ : Optional[Any] = d_kv
A_ : str = d_ff
A_ : int = num_layers
A_ : Any = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
A_ : Optional[Any] = num_heads
A_ : Union[str, Any] = relative_attention_num_buckets
A_ : Dict = relative_attention_max_distance
A_ : List[str] = dropout_rate
A_ : Dict = layer_norm_epsilon
A_ : str = initializer_factor
A_ : Dict = feed_forward_proj
A_ : int = use_cache
A_ : Optional[int] = self.feed_forward_proj.split('''-''' )
A_ : Optional[Any] = act_info[-1]
A_ : Optional[Any] = act_info[0] == '''gated'''
if len(_SCREAMING_SNAKE_CASE ) > 1 and act_info[0] != "gated" or len(_SCREAMING_SNAKE_CASE ) > 2:
raise ValueError(
F'''`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer.'''
'''Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. '''
'''\'gated-gelu\' or \'relu\'''' )
# for backwards compatibility
if feed_forward_proj == "gated-gelu":
A_ : Tuple = '''gelu_new'''
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
class _lowerCamelCase ( UpperCamelCase ):
"""simple docstring"""
@property
def _snake_case ( self )->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
A_ : Union[str, Any] = {
'''input_ids''': {0: '''batch''', 1: '''encoder_sequence'''},
'''attention_mask''': {0: '''batch''', 1: '''encoder_sequence'''},
}
if self.use_past:
A_ : List[str] = '''past_encoder_sequence + sequence'''
A_ : Optional[int] = {0: '''batch'''}
A_ : str = {0: '''batch''', 1: '''past_decoder_sequence + sequence'''}
else:
A_ : Optional[int] = {0: '''batch''', 1: '''decoder_sequence'''}
A_ : List[Any] = {0: '''batch''', 1: '''decoder_sequence'''}
if self.use_past:
self.fill_with_past_key_values_(_SCREAMING_SNAKE_CASE , direction='''inputs''' )
return common_inputs
@property
def _snake_case ( self )->int:
'''simple docstring'''
return 13
| 186 | 0 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowerCamelCase = logging.get_logger(__name__)
lowerCamelCase = {'''vocab_file''': '''spiece.model'''}
lowerCamelCase = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
}
}
lowerCamelCase = {
'''albert-base-v1''': 512,
'''albert-large-v1''': 512,
'''albert-xlarge-v1''': 512,
'''albert-xxlarge-v1''': 512,
'''albert-base-v2''': 512,
'''albert-large-v2''': 512,
'''albert-xlarge-v2''': 512,
'''albert-xxlarge-v2''': 512,
}
lowerCamelCase = '''▁'''
class _a ( snake_case_):
_a : Dict = VOCAB_FILES_NAMES
_a : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_a : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : int , _SCREAMING_SNAKE_CASE : Any , _SCREAMING_SNAKE_CASE : Union[str, Any]=True , _SCREAMING_SNAKE_CASE : int=True , _SCREAMING_SNAKE_CASE : Optional[int]=False , _SCREAMING_SNAKE_CASE : int="[CLS]" , _SCREAMING_SNAKE_CASE : List[str]="[SEP]" , _SCREAMING_SNAKE_CASE : Optional[int]="<unk>" , _SCREAMING_SNAKE_CASE : str="[SEP]" , _SCREAMING_SNAKE_CASE : List[str]="<pad>" , _SCREAMING_SNAKE_CASE : Dict="[CLS]" , _SCREAMING_SNAKE_CASE : Any="[MASK]" , _SCREAMING_SNAKE_CASE : Optional[Dict[str, Any]] = None , **_SCREAMING_SNAKE_CASE : Optional[Any] , )-> None:
lowerCAmelCase__ : Optional[int] = (
AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE , normalized=_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else mask_token
)
lowerCAmelCase__ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=_SCREAMING_SNAKE_CASE , remove_space=_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , sp_model_kwargs=self.sp_model_kwargs , **_SCREAMING_SNAKE_CASE , )
lowerCAmelCase__ : Optional[int] = do_lower_case
lowerCAmelCase__ : Union[str, Any] = remove_space
lowerCAmelCase__ : Any = keep_accents
lowerCAmelCase__ : Dict = vocab_file
lowerCAmelCase__ : Any = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_SCREAMING_SNAKE_CASE )
@property
def UpperCAmelCase__( self : Tuple )-> Any:
return len(self.sp_model )
def UpperCAmelCase__( self : Optional[int] )-> Tuple:
lowerCAmelCase__ : Dict = {self.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Tuple )-> List[Any]:
lowerCAmelCase__ : Optional[int] = self.__dict__.copy()
lowerCAmelCase__ : List[str] = None
return state
def __setstate__( self : Tuple , _SCREAMING_SNAKE_CASE : Optional[int] )-> Tuple:
lowerCAmelCase__ : Any = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
lowerCAmelCase__ : List[Any] = {}
lowerCAmelCase__ : List[str] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def UpperCAmelCase__( self : List[str] , _SCREAMING_SNAKE_CASE : Dict )-> List[Any]:
if self.remove_space:
lowerCAmelCase__ : Tuple = ' '.join(inputs.strip().split() )
else:
lowerCAmelCase__ : Union[str, Any] = inputs
lowerCAmelCase__ : Tuple = outputs.replace('''``''' , '''"''' ).replace('''\'\'''' , '''"''' )
if not self.keep_accents:
lowerCAmelCase__ : Union[str, Any] = unicodedata.normalize('''NFKD''' , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Union[str, Any] = ''.join([c for c in outputs if not unicodedata.combining(_SCREAMING_SNAKE_CASE )] )
if self.do_lower_case:
lowerCAmelCase__ : int = outputs.lower()
return outputs
def UpperCAmelCase__( self : int , _SCREAMING_SNAKE_CASE : str )-> List[str]:
lowerCAmelCase__ : List[str] = self.preprocess_text(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : List[Any] = self.sp_model.encode(_SCREAMING_SNAKE_CASE , out_type=_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[Any] = []
for piece in pieces:
if len(_SCREAMING_SNAKE_CASE ) > 1 and piece[-1] == str(''',''' ) and piece[-2].isdigit():
lowerCAmelCase__ : str = self.sp_model.EncodeAsPieces(piece[:-1].replace(_SCREAMING_SNAKE_CASE , '''''' ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
lowerCAmelCase__ : Optional[int] = cur_pieces[1:]
else:
lowerCAmelCase__ : Union[str, Any] = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(_SCREAMING_SNAKE_CASE )
else:
new_pieces.append(_SCREAMING_SNAKE_CASE )
return new_pieces
def UpperCAmelCase__( self : str , _SCREAMING_SNAKE_CASE : List[Any] )-> Tuple:
return self.sp_model.PieceToId(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Dict , _SCREAMING_SNAKE_CASE : Union[str, Any] )-> List[Any]:
return self.sp_model.IdToPiece(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : List[str] )-> Tuple:
lowerCAmelCase__ : Any = []
lowerCAmelCase__ : Dict = ''
lowerCAmelCase__ : List[str] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE ) + token
lowerCAmelCase__ : Dict = True
lowerCAmelCase__ : Any = []
else:
current_sub_tokens.append(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Dict = False
out_string += self.sp_model.decode(_SCREAMING_SNAKE_CASE )
return out_string.strip()
def UpperCAmelCase__( self : List[str] , _SCREAMING_SNAKE_CASE : List[int] , _SCREAMING_SNAKE_CASE : Optional[List[int]] = None )-> List[int]:
lowerCAmelCase__ : Optional[int] = [self.sep_token_id]
lowerCAmelCase__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def UpperCAmelCase__( self : Tuple , _SCREAMING_SNAKE_CASE : List[int] , _SCREAMING_SNAKE_CASE : Optional[List[int]] = None , _SCREAMING_SNAKE_CASE : bool = False )-> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
if token_ids_a is not None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def UpperCAmelCase__( self : Dict , _SCREAMING_SNAKE_CASE : List[int] , _SCREAMING_SNAKE_CASE : Optional[List[int]] = None )-> List[int]:
lowerCAmelCase__ : int = [self.sep_token_id]
lowerCAmelCase__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def UpperCAmelCase__( self : Any , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[str] = None )-> Tuple[str]:
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
lowerCAmelCase__ : Optional[int] = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '''-''' if filename_prefix else '''''') + VOCAB_FILES_NAMES['''vocab_file'''] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
elif not os.path.isfile(self.vocab_file ):
with open(_SCREAMING_SNAKE_CASE , '''wb''' ) as fi:
lowerCAmelCase__ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(_SCREAMING_SNAKE_CASE )
return (out_vocab_file,)
| 356 |
class _a :
def __init__( self : str , _SCREAMING_SNAKE_CASE : str , _SCREAMING_SNAKE_CASE : Optional[int] , _SCREAMING_SNAKE_CASE : List[Any] )-> Tuple:
lowerCAmelCase__ : List[Any] = None
lowerCAmelCase__ : int = None
lowerCAmelCase__ : Union[str, Any] = graph
self._normalize_graph(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Tuple = len(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Optional[int] = None
def UpperCAmelCase__( self : Union[str, Any] , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] )-> Union[str, Any]:
if sources is int:
lowerCAmelCase__ : List[Any] = [sources]
if sinks is int:
lowerCAmelCase__ : Optional[Any] = [sinks]
if len(_SCREAMING_SNAKE_CASE ) == 0 or len(_SCREAMING_SNAKE_CASE ) == 0:
return
lowerCAmelCase__ : Union[str, Any] = sources[0]
lowerCAmelCase__ : Dict = sinks[0]
# make fake vertex if there are more
# than one source or sink
if len(_SCREAMING_SNAKE_CASE ) > 1 or len(_SCREAMING_SNAKE_CASE ) > 1:
lowerCAmelCase__ : List[Any] = 0
for i in sources:
max_input_flow += sum(self.graph[i] )
lowerCAmelCase__ : Any = len(self.graph ) + 1
for room in self.graph:
room.insert(0 , 0 )
self.graph.insert(0 , [0] * size )
for i in sources:
lowerCAmelCase__ : Optional[Any] = max_input_flow
lowerCAmelCase__ : List[str] = 0
lowerCAmelCase__ : List[Any] = len(self.graph ) + 1
for room in self.graph:
room.append(0 )
self.graph.append([0] * size )
for i in sinks:
lowerCAmelCase__ : str = max_input_flow
lowerCAmelCase__ : Tuple = size - 1
def UpperCAmelCase__( self : Union[str, Any] )-> Union[str, Any]:
if self.maximum_flow_algorithm is None:
raise Exception('''You need to set maximum flow algorithm before.''' )
if self.source_index is None or self.sink_index is None:
return 0
self.maximum_flow_algorithm.execute()
return self.maximum_flow_algorithm.getMaximumFlow()
def UpperCAmelCase__( self : str , _SCREAMING_SNAKE_CASE : List[Any] )-> int:
lowerCAmelCase__ : int = algorithm(self )
class _a :
def __init__( self : Optional[Any] , _SCREAMING_SNAKE_CASE : Tuple )-> Union[str, Any]:
lowerCAmelCase__ : Tuple = flow_network
lowerCAmelCase__ : Dict = flow_network.verticesCount
lowerCAmelCase__ : Optional[Any] = flow_network.sourceIndex
lowerCAmelCase__ : Optional[Any] = flow_network.sinkIndex
# it's just a reference, so you shouldn't change
# it in your algorithms, use deep copy before doing that
lowerCAmelCase__ : str = flow_network.graph
lowerCAmelCase__ : Optional[int] = False
def UpperCAmelCase__( self : List[str] )-> Dict:
if not self.executed:
self._algorithm()
lowerCAmelCase__ : Any = True
def UpperCAmelCase__( self : Optional[Any] )-> int:
pass
class _a ( _lowercase):
def __init__( self : Any , _SCREAMING_SNAKE_CASE : List[Any] )-> Union[str, Any]:
super().__init__(_SCREAMING_SNAKE_CASE )
# use this to save your result
lowerCAmelCase__ : Dict = -1
def UpperCAmelCase__( self : Any )-> Optional[Any]:
if not self.executed:
raise Exception('''You should execute algorithm before using its result!''' )
return self.maximum_flow
class _a ( _lowercase):
def __init__( self : Any , _SCREAMING_SNAKE_CASE : List[str] )-> List[str]:
super().__init__(_SCREAMING_SNAKE_CASE )
lowerCAmelCase__ : Any = [[0] * self.verticies_count for i in range(self.verticies_count )]
lowerCAmelCase__ : Optional[Any] = [0] * self.verticies_count
lowerCAmelCase__ : str = [0] * self.verticies_count
def UpperCAmelCase__( self : Any )-> List[Any]:
lowerCAmelCase__ : Optional[Any] = self.verticies_count
# push some substance to graph
for nextvertex_index, bandwidth in enumerate(self.graph[self.source_index] ):
self.preflow[self.source_index][nextvertex_index] += bandwidth
self.preflow[nextvertex_index][self.source_index] -= bandwidth
self.excesses[nextvertex_index] += bandwidth
# Relabel-to-front selection rule
lowerCAmelCase__ : Any = [
i
for i in range(self.verticies_count )
if i != self.source_index and i != self.sink_index
]
# move through list
lowerCAmelCase__ : str = 0
while i < len(_SCREAMING_SNAKE_CASE ):
lowerCAmelCase__ : Union[str, Any] = vertices_list[i]
lowerCAmelCase__ : Any = self.heights[vertex_index]
self.process_vertex(_SCREAMING_SNAKE_CASE )
if self.heights[vertex_index] > previous_height:
# if it was relabeled, swap elements
# and start from 0 index
vertices_list.insert(0 , vertices_list.pop(_SCREAMING_SNAKE_CASE ) )
lowerCAmelCase__ : Optional[Any] = 0
else:
i += 1
lowerCAmelCase__ : Optional[Any] = sum(self.preflow[self.source_index] )
def UpperCAmelCase__( self : List[str] , _SCREAMING_SNAKE_CASE : Optional[Any] )-> Optional[int]:
while self.excesses[vertex_index] > 0:
for neighbour_index in range(self.verticies_count ):
# if it's neighbour and current vertex is higher
if (
self.graph[vertex_index][neighbour_index]
- self.preflow[vertex_index][neighbour_index]
> 0
and self.heights[vertex_index] > self.heights[neighbour_index]
):
self.push(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.relabel(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__( self : int , _SCREAMING_SNAKE_CASE : Optional[Any] , _SCREAMING_SNAKE_CASE : Union[str, Any] )-> Union[str, Any]:
lowerCAmelCase__ : Union[str, Any] = min(
self.excesses[from_index] , self.graph[from_index][to_index] - self.preflow[from_index][to_index] , )
self.preflow[from_index][to_index] += preflow_delta
self.preflow[to_index][from_index] -= preflow_delta
self.excesses[from_index] -= preflow_delta
self.excesses[to_index] += preflow_delta
def UpperCAmelCase__( self : Optional[int] , _SCREAMING_SNAKE_CASE : Any )-> Optional[int]:
lowerCAmelCase__ : Optional[int] = None
for to_index in range(self.verticies_count ):
if (
self.graph[vertex_index][to_index]
- self.preflow[vertex_index][to_index]
> 0
) and (min_height is None or self.heights[to_index] < min_height):
lowerCAmelCase__ : List[Any] = self.heights[to_index]
if min_height is not None:
lowerCAmelCase__ : Optional[Any] = min_height + 1
if __name__ == "__main__":
lowerCamelCase = [0]
lowerCamelCase = [3]
# graph = [
# [0, 0, 4, 6, 0, 0],
# [0, 0, 5, 2, 0, 0],
# [0, 0, 0, 0, 4, 4],
# [0, 0, 0, 0, 6, 6],
# [0, 0, 0, 0, 0, 0],
# [0, 0, 0, 0, 0, 0],
# ]
lowerCamelCase = [[0, 7, 0, 0], [0, 0, 6, 0], [0, 0, 0, 8], [9, 0, 0, 0]]
# prepare our network
lowerCamelCase = FlowNetwork(graph, entrances, exits)
# set algorithm
flow_network.set_maximum_flow_algorithm(PushRelabelExecutor)
# and calculate
lowerCamelCase = flow_network.find_maximum_flow()
print(f'''maximum flow is {maximum_flow}''')
| 211 | 0 |
'''simple docstring'''
def _A (lowerCAmelCase__ :int ) -> List[Any]:
'''simple docstring'''
if collection == []:
return []
# get some information about the collection
_a = len(lowerCAmelCase__ )
_a = max(lowerCAmelCase__ )
_a = min(lowerCAmelCase__ )
# create the counting array
_a = coll_max + 1 - coll_min
_a = [0] * counting_arr_length
# count how much a number appears in the collection
for number in collection:
counting_arr[number - coll_min] += 1
# sum each position with it's predecessors. now, counting_arr[i] tells
# us how many elements <= i has in the collection
for i in range(1 , lowerCAmelCase__ ):
_a = counting_arr[i] + counting_arr[i - 1]
# create the output collection
_a = [0] * coll_len
# place the elements in the output, respecting the original order (stable
# sort) from end to begin, updating counting_arr
for i in reversed(range(0 , lowerCAmelCase__ ) ):
_a = collection[i]
counting_arr[collection[i] - coll_min] -= 1
return ordered
def _A (lowerCAmelCase__ :Union[str, Any] ) -> Tuple:
'''simple docstring'''
return "".join([chr(lowerCAmelCase__ ) for i in counting_sort([ord(lowerCAmelCase__ ) for c in string] )] )
if __name__ == "__main__":
# Test string sort
assert counting_sort_string("thisisthestring") == "eghhiiinrsssttt"
a_ : Optional[int] = input("Enter numbers separated by a comma:\n").strip()
a_ : int = [int(item) for item in user_input.split(",")]
print(counting_sort(unsorted))
| 168 |
'''simple docstring'''
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
a_ : str = _symbol_database.Default()
a_ : Union[str, Any] = _descriptor_pool.Default().AddSerializedFile(
B"\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"
)
a_ : List[Any] = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, "sentencepiece_model_pb2", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
a_ : List[str] = None
a_ : Tuple = B"H\003"
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
a_ : Optional[int] = 4_5
a_ : Union[str, Any] = 1_5_8_1
a_ : List[Any] = 1_5_1_7
a_ : str = 1_5_7_0
a_ : List[Any] = 1_5_8_4
a_ : str = 1_7_9_3
a_ : List[str] = 1_7_9_5
a_ : Any = 1_9_1_6
a_ : List[str] = 1_8_6_4
a_ : Optional[Any] = 1_9_0_5
a_ : int = 1_9_1_9
a_ : int = 2_4_2_9
a_ : Dict = 2_2_0_8
a_ : Any = 2_4_1_8
a_ : Union[str, Any] = 2_3_2_3
a_ : str = 2_4_0_7
# @@protoc_insertion_point(module_scope)
| 168 | 1 |
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class A (SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[int] , __lowerCAmelCase : VQModel , __lowerCAmelCase : UNetaDModel , __lowerCAmelCase : DDIMScheduler ) -> str:
"""simple docstring"""
super().__init__()
self.register_modules(vqvae=__lowerCAmelCase , unet=__lowerCAmelCase , scheduler=__lowerCAmelCase )
@torch.no_grad()
def __call__( self : Dict , __lowerCAmelCase : int = 1 , __lowerCAmelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __lowerCAmelCase : float = 0.0 , __lowerCAmelCase : int = 50 , __lowerCAmelCase : Optional[str] = "pil" , __lowerCAmelCase : bool = True , **__lowerCAmelCase : List[str] , ) -> Union[Tuple, ImagePipelineOutput]:
"""simple docstring"""
A__ = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) , generator=__lowerCAmelCase , )
A__ = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
A__ = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__lowerCAmelCase )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
A__ = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A__ = {}
if accepts_eta:
A__ = eta
for t in self.progress_bar(self.scheduler.timesteps ):
A__ = self.scheduler.scale_model_input(__lowerCAmelCase , __lowerCAmelCase )
# predict the noise residual
A__ = self.unet(__lowerCAmelCase , __lowerCAmelCase ).sample
# compute the previous noisy sample x_t -> x_t-1
A__ = self.scheduler.step(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , **__lowerCAmelCase ).prev_sample
# decode the image latents with the VAE
A__ = self.vqvae.decode(__lowerCAmelCase ).sample
A__ = (image / 2 + 0.5).clamp(0 , 1 )
A__ = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
A__ = self.numpy_to_pil(__lowerCAmelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__lowerCAmelCase )
| 276 |
import math
def __lowerCamelCase ( ) -> None:
"""simple docstring"""
A__ = input("""Enter message: """ )
A__ = int(input(F'Enter key [2-{len(__a ) - 1}]: ' ) )
A__ = input("""Encryption/Decryption [e/d]: """ )
if mode.lower().startswith("""e""" ):
A__ = encrypt_message(__a , __a )
elif mode.lower().startswith("""d""" ):
A__ = decrypt_message(__a , __a )
# Append pipe symbol (vertical bar) to identify spaces at the end.
print(F'Output:\n{text + "|"}' )
def __lowerCamelCase ( __a :int , __a :str ) -> str:
"""simple docstring"""
A__ = [""""""] * key
for col in range(__a ):
A__ = col
while pointer < len(__a ):
cipher_text[col] += message[pointer]
pointer += key
return "".join(__a )
def __lowerCamelCase ( __a :int , __a :str ) -> str:
"""simple docstring"""
A__ = math.ceil(len(__a ) / key )
A__ = key
A__ = (num_cols * num_rows) - len(__a )
A__ = [""""""] * num_cols
A__ = 0
A__ = 0
for symbol in message:
plain_text[col] += symbol
col += 1
if (
(col == num_cols)
or (col == num_cols - 1)
and (row >= num_rows - num_shaded_boxes)
):
A__ = 0
row += 1
return "".join(__a )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 276 | 1 |
"""simple docstring"""
def a__ ( SCREAMING_SNAKE_CASE : int ):
'''simple docstring'''
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 108 |
"""simple docstring"""
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
_SCREAMING_SNAKE_CASE : Union[str, Any] = '''CompVis/stable-diffusion-v1-1'''
_SCREAMING_SNAKE_CASE : Optional[Any] = '''CompVis/stable-diffusion-v1-2'''
_SCREAMING_SNAKE_CASE : int = '''CompVis/stable-diffusion-v1-3'''
_SCREAMING_SNAKE_CASE : str = '''CompVis/stable-diffusion-v1-4'''
class a ( __snake_case ):
def __init__( self : int , __SCREAMING_SNAKE_CASE : AutoencoderKL , __SCREAMING_SNAKE_CASE : CLIPTextModel , __SCREAMING_SNAKE_CASE : CLIPTokenizer , __SCREAMING_SNAKE_CASE : UNetaDConditionModel , __SCREAMING_SNAKE_CASE : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] , __SCREAMING_SNAKE_CASE : StableDiffusionSafetyChecker , __SCREAMING_SNAKE_CASE : CLIPImageProcessor , __SCREAMING_SNAKE_CASE : bool = True , ) -> List[str]:
super()._init_()
lowerCamelCase_ = StableDiffusionPipeline.from_pretrained(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = StableDiffusionPipeline.from_pretrained(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = StableDiffusionPipeline.from_pretrained(__SCREAMING_SNAKE_CASE )
lowerCamelCase_ = StableDiffusionPipeline(
vae=__SCREAMING_SNAKE_CASE , text_encoder=__SCREAMING_SNAKE_CASE , tokenizer=__SCREAMING_SNAKE_CASE , unet=__SCREAMING_SNAKE_CASE , scheduler=__SCREAMING_SNAKE_CASE , safety_checker=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE , requires_safety_checker=__SCREAMING_SNAKE_CASE , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def UpperCamelCase ( self : List[str] ) -> Dict[str, Any]:
return {k: getattr(self , __SCREAMING_SNAKE_CASE ) for k in self.config.keys() if not k.startswith('_' )}
def UpperCamelCase ( self : int , __SCREAMING_SNAKE_CASE : Optional[Union[str, int]] = "auto" ) -> Any:
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
lowerCamelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(__SCREAMING_SNAKE_CASE )
def UpperCamelCase ( self : Any ) -> List[Any]:
self.enable_attention_slicing(__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def UpperCamelCase ( self : Any , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 50 , __SCREAMING_SNAKE_CASE : float = 7.5 , __SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , __SCREAMING_SNAKE_CASE : float = 0.0 , __SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , __SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __SCREAMING_SNAKE_CASE : int = 1 , **__SCREAMING_SNAKE_CASE : int , ) -> Tuple:
return self.pipea(
prompt=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , width=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , output_type=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def UpperCamelCase ( self : List[str] , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 50 , __SCREAMING_SNAKE_CASE : float = 7.5 , __SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , __SCREAMING_SNAKE_CASE : float = 0.0 , __SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , __SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __SCREAMING_SNAKE_CASE : int = 1 , **__SCREAMING_SNAKE_CASE : List[str] , ) -> Optional[int]:
return self.pipea(
prompt=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , width=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , output_type=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def UpperCamelCase ( self : Optional[Any] , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 50 , __SCREAMING_SNAKE_CASE : float = 7.5 , __SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , __SCREAMING_SNAKE_CASE : float = 0.0 , __SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , __SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __SCREAMING_SNAKE_CASE : int = 1 , **__SCREAMING_SNAKE_CASE : Optional[int] , ) -> Tuple:
return self.pipea(
prompt=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , width=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , output_type=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def UpperCamelCase ( self : Tuple , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 50 , __SCREAMING_SNAKE_CASE : float = 7.5 , __SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , __SCREAMING_SNAKE_CASE : float = 0.0 , __SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , __SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __SCREAMING_SNAKE_CASE : int = 1 , **__SCREAMING_SNAKE_CASE : Tuple , ) -> Tuple:
return self.pipea(
prompt=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , width=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , output_type=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
@torch.no_grad()
def UpperCamelCase ( self : Dict , __SCREAMING_SNAKE_CASE : Union[str, List[str]] , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 512 , __SCREAMING_SNAKE_CASE : int = 50 , __SCREAMING_SNAKE_CASE : float = 7.5 , __SCREAMING_SNAKE_CASE : Optional[Union[str, List[str]]] = None , __SCREAMING_SNAKE_CASE : Optional[int] = 1 , __SCREAMING_SNAKE_CASE : float = 0.0 , __SCREAMING_SNAKE_CASE : Optional[torch.Generator] = None , __SCREAMING_SNAKE_CASE : Optional[torch.FloatTensor] = None , __SCREAMING_SNAKE_CASE : Optional[str] = "pil" , __SCREAMING_SNAKE_CASE : bool = True , __SCREAMING_SNAKE_CASE : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __SCREAMING_SNAKE_CASE : int = 1 , **__SCREAMING_SNAKE_CASE : int , ) -> str:
lowerCamelCase_ = 'cuda' if torch.cuda.is_available() else 'cpu'
self.to(__SCREAMING_SNAKE_CASE )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(F'''`height` and `width` must be divisible by 8 but are {height} and {width}.''' )
# Get first result from Stable Diffusion Checkpoint v1.1
lowerCamelCase_ = self.textaimg_sda_a(
prompt=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , width=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , output_type=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.2
lowerCamelCase_ = self.textaimg_sda_a(
prompt=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , width=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , output_type=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.3
lowerCamelCase_ = self.textaimg_sda_a(
prompt=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , width=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , output_type=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
# Get first result from Stable Diffusion Checkpoint v1.4
lowerCamelCase_ = self.textaimg_sda_a(
prompt=__SCREAMING_SNAKE_CASE , height=__SCREAMING_SNAKE_CASE , width=__SCREAMING_SNAKE_CASE , num_inference_steps=__SCREAMING_SNAKE_CASE , guidance_scale=__SCREAMING_SNAKE_CASE , negative_prompt=__SCREAMING_SNAKE_CASE , num_images_per_prompt=__SCREAMING_SNAKE_CASE , eta=__SCREAMING_SNAKE_CASE , generator=__SCREAMING_SNAKE_CASE , latents=__SCREAMING_SNAKE_CASE , output_type=__SCREAMING_SNAKE_CASE , return_dict=__SCREAMING_SNAKE_CASE , callback=__SCREAMING_SNAKE_CASE , callback_steps=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 183 | 0 |
'''simple docstring'''
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
UpperCAmelCase : Optional[Any] = logging.getLogger()
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = os.path.join(a__ , """all_results.json""" )
if os.path.exists(a__ ):
with open(a__ , """r""" ) as f:
__SCREAMING_SNAKE_CASE = json.load(a__ )
else:
raise ValueError(F'can\'t find {path}' )
return results
UpperCAmelCase : Tuple = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class lowerCAmelCase__ ( a ):
"""simple docstring"""
def UpperCAmelCase__ ( self : int ) -> List[Any]:
"""simple docstring"""
import xla_spawn
__SCREAMING_SNAKE_CASE = self.get_auto_remove_tmp_dir()
__SCREAMING_SNAKE_CASE = f'\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split()
with patch.object(__SCREAMING_SNAKE_CASE , """argv""" , __SCREAMING_SNAKE_CASE ):
__SCREAMING_SNAKE_CASE = time()
xla_spawn.main()
__SCREAMING_SNAKE_CASE = time()
__SCREAMING_SNAKE_CASE = get_results(__SCREAMING_SNAKE_CASE )
self.assertGreaterEqual(result["""eval_accuracy"""] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def UpperCAmelCase__ ( self : Dict ) -> Optional[int]:
"""simple docstring"""
import xla_spawn
__SCREAMING_SNAKE_CASE = """
./tests/test_trainer_tpu.py
--num_cores=8
./tests/test_trainer_tpu.py
""".split()
with patch.object(__SCREAMING_SNAKE_CASE , """argv""" , __SCREAMING_SNAKE_CASE ):
xla_spawn.main()
| 331 |
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
UpperCAmelCase : Any = logging.getLogger(__name__)
require_version('pytorch_lightning>=1.0.4')
UpperCAmelCase : Optional[Any] = {
'base': AutoModel,
'sequence-classification': AutoModelForSequenceClassification,
'question-answering': AutoModelForQuestionAnswering,
'pretraining': AutoModelForPreTraining,
'token-classification': AutoModelForTokenClassification,
'language-modeling': AutoModelWithLMHead,
'summarization': AutoModelForSeqaSeqLM,
'translation': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
UpperCAmelCase : Dict = {
'linear': get_linear_schedule_with_warmup,
'cosine': get_cosine_schedule_with_warmup,
'cosine_w_restarts': get_cosine_with_hard_restarts_schedule_with_warmup,
'polynomial': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
UpperCAmelCase : Optional[Any] = sorted(arg_to_scheduler.keys())
UpperCAmelCase : str = '{' + ', '.join(arg_to_scheduler_choices) + '}'
class lowerCAmelCase__ ( pl.LightningModule ):
"""simple docstring"""
def __init__( self : Optional[int] , __SCREAMING_SNAKE_CASE : argparse.Namespace , __SCREAMING_SNAKE_CASE : Optional[Any]=None , __SCREAMING_SNAKE_CASE : Dict="base" , __SCREAMING_SNAKE_CASE : Dict=None , __SCREAMING_SNAKE_CASE : str=None , __SCREAMING_SNAKE_CASE : List[str]=None , **__SCREAMING_SNAKE_CASE : Union[str, Any] , ) -> Any:
"""simple docstring"""
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = Path(self.hparams.output_dir )
__SCREAMING_SNAKE_CASE = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({"""num_labels""": num_labels} if num_labels is not None else {}) , cache_dir=__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE , )
else:
__SCREAMING_SNAKE_CASE = config
__SCREAMING_SNAKE_CASE = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(self.hparams , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
assert hasattr(self.config , __SCREAMING_SNAKE_CASE ), f'model config doesn\'t have a `{p}` attribute'
setattr(self.config , __SCREAMING_SNAKE_CASE , getattr(self.hparams , __SCREAMING_SNAKE_CASE ) )
if tokenizer is None:
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=__SCREAMING_SNAKE_CASE , )
else:
__SCREAMING_SNAKE_CASE = tokenizer
__SCREAMING_SNAKE_CASE = MODEL_MODES[mode]
if model is None:
__SCREAMING_SNAKE_CASE = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool(""".ckpt""" in self.hparams.model_name_or_path ) , config=self.config , cache_dir=__SCREAMING_SNAKE_CASE , )
else:
__SCREAMING_SNAKE_CASE = model
def UpperCAmelCase__ ( self : List[str] , *__SCREAMING_SNAKE_CASE : List[Any] , **__SCREAMING_SNAKE_CASE : List[Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_type.from_pretrained(*__SCREAMING_SNAKE_CASE , **__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[Any] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = arg_to_scheduler[self.hparams.lr_scheduler]
__SCREAMING_SNAKE_CASE = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
__SCREAMING_SNAKE_CASE = {"""scheduler""": scheduler, """interval""": """step""", """frequency""": 1}
return scheduler
def UpperCAmelCase__ ( self : int ) -> Optional[int]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model
__SCREAMING_SNAKE_CASE = ["""bias""", """LayerNorm.weight"""]
__SCREAMING_SNAKE_CASE = [
{
"""params""": [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
"""weight_decay""": self.hparams.weight_decay,
},
{
"""params""": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
"""weight_decay""": 0.0,
},
]
if self.hparams.adafactor:
__SCREAMING_SNAKE_CASE = Adafactor(
__SCREAMING_SNAKE_CASE , lr=self.hparams.learning_rate , scale_parameter=__SCREAMING_SNAKE_CASE , relative_step=__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = AdamW(
__SCREAMING_SNAKE_CASE , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
__SCREAMING_SNAKE_CASE = optimizer
__SCREAMING_SNAKE_CASE = self.get_lr_scheduler()
return [optimizer], [scheduler]
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : Optional[int] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> int:
"""simple docstring"""
return self.validation_step(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : int , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Dict:
"""simple docstring"""
return self.validation_end(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Tuple ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
__SCREAMING_SNAKE_CASE = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : int ) -> Union[str, Any]:
"""simple docstring"""
if stage == "test":
__SCREAMING_SNAKE_CASE = len(self.test_dataloader().dataset )
else:
__SCREAMING_SNAKE_CASE = self.get_dataloader("""train""" , self.hparams.train_batch_size , shuffle=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = len(self.train_dataloader().dataset )
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : str , __SCREAMING_SNAKE_CASE : int , __SCREAMING_SNAKE_CASE : bool = False ) -> int:
"""simple docstring"""
raise NotImplementedError("""You must implement this for your task""" )
def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
return self.train_loader
def UpperCAmelCase__ ( self : str ) -> Optional[Any]:
"""simple docstring"""
return self.get_dataloader("""dev""" , self.hparams.eval_batch_size , shuffle=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : str ) -> Any:
"""simple docstring"""
return self.get_dataloader("""test""" , self.hparams.eval_batch_size , shuffle=__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : str , __SCREAMING_SNAKE_CASE : Dict ) -> Union[str, Any]:
"""simple docstring"""
return os.path.join(
self.hparams.data_dir , """cached_{}_{}_{}""".format(
__SCREAMING_SNAKE_CASE , list(filter(__SCREAMING_SNAKE_CASE , self.hparams.model_name_or_path.split("""/""" ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : Dict[str, Any] ) -> None:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.output_dir.joinpath("""best_tfmr""" )
__SCREAMING_SNAKE_CASE = self.step_count
self.model.save_pretrained(__SCREAMING_SNAKE_CASE )
self.tokenizer.save_pretrained(__SCREAMING_SNAKE_CASE )
@staticmethod
def UpperCAmelCase__ ( __SCREAMING_SNAKE_CASE : Optional[Any] , __SCREAMING_SNAKE_CASE : Any ) -> int:
"""simple docstring"""
parser.add_argument(
"""--model_name_or_path""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--config_name""" , default="""""" , type=__SCREAMING_SNAKE_CASE , help="""Pretrained config name or path if not the same as model_name""" )
parser.add_argument(
"""--tokenizer_name""" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Pretrained tokenizer name or path if not the same as model_name""" , )
parser.add_argument(
"""--cache_dir""" , default=str(Path(__SCREAMING_SNAKE_CASE ).parent / """test_run""" / """cache""" ) , type=__SCREAMING_SNAKE_CASE , help="""Where do you want to store the pre-trained models downloaded from huggingface.co""" , )
parser.add_argument(
"""--encoder_layerdrop""" , type=__SCREAMING_SNAKE_CASE , help="""Encoder layer dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--decoder_layerdrop""" , type=__SCREAMING_SNAKE_CASE , help="""Decoder layer dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--dropout""" , type=__SCREAMING_SNAKE_CASE , help="""Dropout probability (Optional). Goes into model.config""" , )
parser.add_argument(
"""--attention_dropout""" , type=__SCREAMING_SNAKE_CASE , help="""Attention dropout probability (Optional). Goes into model.config""" , )
parser.add_argument("""--learning_rate""" , default=5E-5 , type=__SCREAMING_SNAKE_CASE , help="""The initial learning rate for Adam.""" )
parser.add_argument(
"""--lr_scheduler""" , default="""linear""" , choices=__SCREAMING_SNAKE_CASE , metavar=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="""Learning rate scheduler""" , )
parser.add_argument("""--weight_decay""" , default=0.0 , type=__SCREAMING_SNAKE_CASE , help="""Weight decay if we apply some.""" )
parser.add_argument("""--adam_epsilon""" , default=1E-8 , type=__SCREAMING_SNAKE_CASE , help="""Epsilon for Adam optimizer.""" )
parser.add_argument("""--warmup_steps""" , default=0 , type=__SCREAMING_SNAKE_CASE , help="""Linear warmup over warmup_steps.""" )
parser.add_argument("""--num_workers""" , default=4 , type=__SCREAMING_SNAKE_CASE , help="""kwarg passed to DataLoader""" )
parser.add_argument("""--num_train_epochs""" , dest="""max_epochs""" , default=3 , type=__SCREAMING_SNAKE_CASE )
parser.add_argument("""--train_batch_size""" , default=32 , type=__SCREAMING_SNAKE_CASE )
parser.add_argument("""--eval_batch_size""" , default=32 , type=__SCREAMING_SNAKE_CASE )
parser.add_argument("""--adafactor""" , action="""store_true""" )
class lowerCAmelCase__ ( pl.Callback ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : Any , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class lowerCAmelCase__ ( pl.Callback ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Dict , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : Union[str, Any] ) -> Any:
"""simple docstring"""
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(__SCREAMING_SNAKE_CASE )
class lowerCAmelCase__ ( pl.Callback ):
"""simple docstring"""
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : str ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = trainer.lr_schedulers[0]["""scheduler"""]
__SCREAMING_SNAKE_CASE = {f'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] , __SCREAMING_SNAKE_CASE : pl.Trainer , __SCREAMING_SNAKE_CASE : pl.LightningModule ) -> List[Any]:
"""simple docstring"""
rank_zero_info("""***** Validation results *****""" )
__SCREAMING_SNAKE_CASE = trainer.callback_metrics
# Log results
for key in sorted(__SCREAMING_SNAKE_CASE ):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(__SCREAMING_SNAKE_CASE , str(metrics[key] ) ) )
def UpperCAmelCase__ ( self : Tuple , __SCREAMING_SNAKE_CASE : pl.Trainer , __SCREAMING_SNAKE_CASE : pl.LightningModule ) -> str:
"""simple docstring"""
rank_zero_info("""***** Test results *****""" )
__SCREAMING_SNAKE_CASE = trainer.callback_metrics
# Log and save results to file
__SCREAMING_SNAKE_CASE = os.path.join(pl_module.hparams.output_dir , """test_results.txt""" )
with open(__SCREAMING_SNAKE_CASE , """w""" ) as writer:
for key in sorted(__SCREAMING_SNAKE_CASE ):
if key not in ["log", "progress_bar"]:
rank_zero_info("""{} = {}\n""".format(__SCREAMING_SNAKE_CASE , str(metrics[key] ) ) )
writer.write("""{} = {}\n""".format(__SCREAMING_SNAKE_CASE , str(metrics[key] ) ) )
def a__ ( a__ , a__ ):
"""simple docstring"""
parser.add_argument(
"""--output_dir""" , default=str(Path(a__ ).parent / """test_run""" / """model_checkpoints""" ) , type=a__ , help="""The output directory where the model predictions and checkpoints will be written.""" , )
parser.add_argument(
"""--fp16""" , action="""store_true""" , help="""Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit""" , )
parser.add_argument(
"""--fp16_opt_level""" , type=a__ , default="""O2""" , help=(
"""For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."""
"""See details at https://nvidia.github.io/apex/amp.html"""
) , )
parser.add_argument("""--n_tpu_cores""" , dest="""tpu_cores""" , type=a__ )
parser.add_argument("""--max_grad_norm""" , dest="""gradient_clip_val""" , default=1.0 , type=a__ , help="""Max gradient norm""" )
parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""" )
parser.add_argument("""--do_predict""" , action="""store_true""" , help="""Whether to run predictions on the test set.""" )
parser.add_argument(
"""--gradient_accumulation_steps""" , dest="""accumulate_grad_batches""" , type=a__ , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , )
parser.add_argument("""--seed""" , type=a__ , default=42 , help="""random seed for initialization""" )
parser.add_argument(
"""--data_dir""" , default=str(Path(a__ ).parent / """test_run""" / """dummy-train-data""" ) , type=a__ , help="""The input data dir. Should contain the training files for the CoNLL-2003 NER task.""" , )
def a__ ( a__ , a__ , a__=None , a__=True , a__=[] , a__=None , a__=None , **a__ , ):
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
__SCREAMING_SNAKE_CASE = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=a__ )
# add custom checkpoints
if checkpoint_callback is None:
__SCREAMING_SNAKE_CASE = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix="""checkpoint""" , monitor="""val_loss""" , mode="""min""" , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(a__ )
if logging_callback is None:
__SCREAMING_SNAKE_CASE = LoggingCallback()
__SCREAMING_SNAKE_CASE = {}
if args.fpaa:
__SCREAMING_SNAKE_CASE = 16
if args.gpus > 1:
__SCREAMING_SNAKE_CASE = """auto"""
__SCREAMING_SNAKE_CASE = """ddp"""
__SCREAMING_SNAKE_CASE = args.accumulate_grad_batches
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = """auto"""
__SCREAMING_SNAKE_CASE = pl.Trainer.from_argparse_args(
a__ , weights_summary=a__ , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=a__ , val_check_interval=1 , num_sanity_val_steps=2 , **a__ , )
if args.do_train:
trainer.fit(a__ )
else:
print("""RAG modeling tests with new set functions successfuly executed!""" )
return trainer
| 331 | 1 |
'''simple docstring'''
from maths.is_square_free import is_square_free
from maths.prime_factors import prime_factors
def __A ( lowerCamelCase_ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE : List[str] = prime_factors(lowerCamelCase_ )
if is_square_free(lowerCamelCase_ ):
return -1 if len(lowerCamelCase_ ) % 2 else 1
return 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 323 |
'''simple docstring'''
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import AutoTokenizer, BarkProcessor
from transformers.testing_utils import require_torch, slow
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
def lowerCamelCase_ ( self : int ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = """ylacombe/bark-small"""
SCREAMING_SNAKE_CASE : Tuple = tempfile.mkdtemp()
SCREAMING_SNAKE_CASE : str = """en_speaker_1"""
SCREAMING_SNAKE_CASE : Optional[int] = """This is a test string"""
SCREAMING_SNAKE_CASE : Optional[int] = """speaker_embeddings_path.json"""
SCREAMING_SNAKE_CASE : List[Any] = """speaker_embeddings"""
def lowerCamelCase_ ( self : int , **lowerCamelCase_ : int ):
'''simple docstring'''
return AutoTokenizer.from_pretrained(self.checkpoint , **lowerCamelCase_ )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Optional[Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : List[str] = BarkProcessor(tokenizer=lowerCamelCase_ )
processor.save_pretrained(self.tmpdirname )
SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
@slow
def lowerCamelCase_ ( self : List[Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
processor.save_pretrained(
self.tmpdirname , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , speaker_embeddings_directory=self.speaker_embeddings_directory , )
SCREAMING_SNAKE_CASE : Dict = self.get_tokenizer(bos_token="""(BOS)""" , eos_token="""(EOS)""" )
SCREAMING_SNAKE_CASE : int = BarkProcessor.from_pretrained(
self.tmpdirname , self.speaker_embeddings_dict_path , bos_token="""(BOS)""" , eos_token="""(EOS)""" , )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
def lowerCamelCase_ ( self : Union[str, Any] ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : List[Any] = BarkProcessor.from_pretrained(
pretrained_processor_name_or_path=self.checkpoint , speaker_embeddings_dict_path=self.speaker_embeddings_dict_path , )
SCREAMING_SNAKE_CASE : List[str] = 35
SCREAMING_SNAKE_CASE : List[Any] = 2
SCREAMING_SNAKE_CASE : int = 8
SCREAMING_SNAKE_CASE : Optional[int] = {
"""semantic_prompt""": np.ones(lowerCamelCase_ ),
"""coarse_prompt""": np.ones((nb_codebooks_coarse, seq_len) ),
"""fine_prompt""": np.ones((nb_codebooks_total, seq_len) ),
}
# test providing already loaded voice_preset
SCREAMING_SNAKE_CASE : Tuple = processor(text=self.input_string , voice_preset=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from npz file
SCREAMING_SNAKE_CASE : List[str] = os.path.join(self.tmpdirname , """file.npz""" )
np.savez(lowerCamelCase_ , **lowerCamelCase_ )
SCREAMING_SNAKE_CASE : int = processor(text=self.input_string , voice_preset=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : str = inputs["""history_prompt"""]
for key in voice_preset:
self.assertListEqual(voice_preset[key].tolist() , processed_voice_preset.get(lowerCamelCase_ , np.array([] ) ).tolist() )
# test loading voice preset from the hub
SCREAMING_SNAKE_CASE : Optional[Any] = processor(text=self.input_string , voice_preset=self.voice_preset )
def lowerCamelCase_ ( self : str ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : Union[str, Any] = self.get_tokenizer()
SCREAMING_SNAKE_CASE : Any = BarkProcessor(tokenizer=lowerCamelCase_ )
SCREAMING_SNAKE_CASE : Union[str, Any] = processor(text=self.input_string )
SCREAMING_SNAKE_CASE : Tuple = tokenizer(
self.input_string , padding="""max_length""" , max_length=2_56 , add_special_tokens=lowerCamelCase_ , return_attention_mask=lowerCamelCase_ , return_token_type_ids=lowerCamelCase_ , )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key].squeeze().tolist() )
| 323 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
UpperCamelCase_ = {
'configuration_layoutlmv3': [
'LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP',
'LayoutLMv3Config',
'LayoutLMv3OnnxConfig',
],
'processing_layoutlmv3': ['LayoutLMv3Processor'],
'tokenization_layoutlmv3': ['LayoutLMv3Tokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['LayoutLMv3TokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'LayoutLMv3ForQuestionAnswering',
'LayoutLMv3ForSequenceClassification',
'LayoutLMv3ForTokenClassification',
'LayoutLMv3Model',
'LayoutLMv3PreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLayoutLMv3ForQuestionAnswering',
'TFLayoutLMv3ForSequenceClassification',
'TFLayoutLMv3ForTokenClassification',
'TFLayoutLMv3Model',
'TFLayoutLMv3PreTrainedModel',
]
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ['LayoutLMv3FeatureExtractor']
UpperCamelCase_ = ['LayoutLMv3ImageProcessor']
if TYPE_CHECKING:
from .configuration_layoutlmva import (
LAYOUTLMV3_PRETRAINED_CONFIG_ARCHIVE_MAP,
LayoutLMvaConfig,
LayoutLMvaOnnxConfig,
)
from .processing_layoutlmva import LayoutLMvaProcessor
from .tokenization_layoutlmva import LayoutLMvaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutlmva_fast import LayoutLMvaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_layoutlmva import (
LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
LayoutLMvaForQuestionAnswering,
LayoutLMvaForSequenceClassification,
LayoutLMvaForTokenClassification,
LayoutLMvaModel,
LayoutLMvaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_layoutlmva import (
TF_LAYOUTLMV3_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLayoutLMvaForQuestionAnswering,
TFLayoutLMvaForSequenceClassification,
TFLayoutLMvaForTokenClassification,
TFLayoutLMvaModel,
TFLayoutLMvaPreTrainedModel,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_layoutlmva import LayoutLMvaFeatureExtractor
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 303 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
UpperCamelCase_ = {
'configuration_lilt': ['LILT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LiltConfig'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
'LILT_PRETRAINED_MODEL_ARCHIVE_LIST',
'LiltForQuestionAnswering',
'LiltForSequenceClassification',
'LiltForTokenClassification',
'LiltModel',
'LiltPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__) | 303 | 1 |
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def __SCREAMING_SNAKE_CASE (SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ = None , ):
snake_case_ = {}
if train_file is not None:
snake_case_ = [train_file]
if eval_file is not None:
snake_case_ = [eval_file]
if test_file is not None:
snake_case_ = [test_file]
snake_case_ = datasets.load_dataset('''csv''' , data_files=SCREAMING_SNAKE_CASE__ )
snake_case_ = list(ds[list(files.keys() )[0]].features.keys() )
snake_case_ = features_name.pop(SCREAMING_SNAKE_CASE__ )
snake_case_ = list(set(ds[list(files.keys() )[0]][label_name] ) )
snake_case_ = {label: i for i, label in enumerate(SCREAMING_SNAKE_CASE__ )}
snake_case_ = tokenizer.model_input_names
snake_case_ = {}
if len(SCREAMING_SNAKE_CASE__ ) == 1:
for k in files.keys():
snake_case_ = ds[k].map(
lambda SCREAMING_SNAKE_CASE__ : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='''max_length''' ) , batched=SCREAMING_SNAKE_CASE__ , )
elif len(SCREAMING_SNAKE_CASE__ ) == 2:
for k in files.keys():
snake_case_ = ds[k].map(
lambda SCREAMING_SNAKE_CASE__ : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=SCREAMING_SNAKE_CASE__ , max_length=SCREAMING_SNAKE_CASE__ , padding='''max_length''' , ) , batched=SCREAMING_SNAKE_CASE__ , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
snake_case_ = {k: v for k, v in ex.items() if k in input_names}
snake_case_ = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
snake_case_ = {k: v for k, v in ex.items() if k in input_names}
snake_case_ = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
snake_case_ = {k: v for k, v in ex.items() if k in input_names}
snake_case_ = labelaid[ex[label_name]]
yield (d, label)
snake_case_ = (
tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
snake_case_ = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
snake_case_ = (
tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
snake_case_ = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
snake_case_ = (
tf.data.Dataset.from_generator(
SCREAMING_SNAKE_CASE__ , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
snake_case_ = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
lowerCAmelCase_ = logging.getLogger(__name__)
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = field(metadata={"help": "Which column contains the label"} )
SCREAMING_SNAKE_CASE : str = field(default=__A , metadata={"help": "The path of the training file"} )
SCREAMING_SNAKE_CASE : Optional[str] = field(default=__A , metadata={"help": "The path of the development file"} )
SCREAMING_SNAKE_CASE : Optional[str] = field(default=__A , metadata={"help": "The path of the test file"} )
SCREAMING_SNAKE_CASE : int = field(
default=128 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
SCREAMING_SNAKE_CASE : bool = field(
default=__A , metadata={"help": "Overwrite the cached training and evaluation sets"} )
@dataclass
class snake_case_ :
'''simple docstring'''
SCREAMING_SNAKE_CASE : str = field(
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__A , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__A , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
SCREAMING_SNAKE_CASE : bool = field(default=__A , metadata={"help": "Set this flag to use fast tokenization."} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
SCREAMING_SNAKE_CASE : Optional[str] = field(
default=__A , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
def __SCREAMING_SNAKE_CASE ():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
snake_case_, snake_case_, snake_case_ = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
''' --overwrite_output_dir to overcome.''' )
# Setup logging
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''' , datefmt='''%m/%d/%Y %H:%M:%S''' , level=logging.INFO , )
logger.info(
F'''n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, '''
F'''16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
snake_case_, snake_case_, snake_case_, snake_case_ = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=SCREAMING_SNAKE_CASE__ , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
snake_case_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(SCREAMING_SNAKE_CASE__ ) , labelaid=SCREAMING_SNAKE_CASE__ , idalabel={id: label for label, id in labelaid.items()} , finetuning_task='''text-classification''' , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
snake_case_ = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool('''.bin''' in model_args.model_name_or_path ) , config=SCREAMING_SNAKE_CASE__ , cache_dir=model_args.cache_dir , )
def compute_metrics(SCREAMING_SNAKE_CASE__ ) -> Dict:
snake_case_ = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
snake_case_ = TFTrainer(
model=SCREAMING_SNAKE_CASE__ , args=SCREAMING_SNAKE_CASE__ , train_dataset=SCREAMING_SNAKE_CASE__ , eval_dataset=SCREAMING_SNAKE_CASE__ , compute_metrics=SCREAMING_SNAKE_CASE__ , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case_ = {}
if training_args.do_eval:
logger.info('''*** Evaluate ***''' )
snake_case_ = trainer.evaluate()
snake_case_ = os.path.join(training_args.output_dir , '''eval_results.txt''' )
with open(SCREAMING_SNAKE_CASE__ , '''w''' ) as writer:
logger.info('''***** Eval results *****''' )
for key, value in result.items():
logger.info(F''' {key} = {value}''' )
writer.write(F'''{key} = {value}\n''' )
results.update(SCREAMING_SNAKE_CASE__ )
return results
if __name__ == "__main__":
main() | 8 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCAmelCase : Any ={'configuration_vit_msn': ['VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ViTMSNConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int =[
'VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST',
'ViTMSNModel',
'ViTMSNForImageClassification',
'ViTMSNPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
__lowerCAmelCase : Union[str, Any] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 9 | 0 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def a__ ( a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__SCREAMING_SNAKE_CASE = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
__SCREAMING_SNAKE_CASE = 4
__SCREAMING_SNAKE_CASE = 48
__SCREAMING_SNAKE_CASE = """pixelshuffle_aux"""
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__SCREAMING_SNAKE_CASE = [6, 6, 6, 6]
__SCREAMING_SNAKE_CASE = 60
__SCREAMING_SNAKE_CASE = [6, 6, 6, 6]
__SCREAMING_SNAKE_CASE = """pixelshuffledirect"""
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__SCREAMING_SNAKE_CASE = 4
__SCREAMING_SNAKE_CASE = """nearest+conv"""
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 1_26
__SCREAMING_SNAKE_CASE = 7
__SCREAMING_SNAKE_CASE = 255.0
__SCREAMING_SNAKE_CASE = """"""
return config
def a__ ( a__ , a__ ):
"""simple docstring"""
if "patch_embed.proj" in name and "layers" not in name:
__SCREAMING_SNAKE_CASE = name.replace("""patch_embed.proj""" , """embeddings.patch_embeddings.projection""" )
if "patch_embed.norm" in name:
__SCREAMING_SNAKE_CASE = name.replace("""patch_embed.norm""" , """embeddings.patch_embeddings.layernorm""" )
if "layers" in name:
__SCREAMING_SNAKE_CASE = name.replace("""layers""" , """encoder.stages""" )
if "residual_group.blocks" in name:
__SCREAMING_SNAKE_CASE = name.replace("""residual_group.blocks""" , """layers""" )
if "attn.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace("""attn.proj""" , """attention.output.dense""" )
if "attn" in name:
__SCREAMING_SNAKE_CASE = name.replace("""attn""" , """attention.self""" )
if "norm1" in name:
__SCREAMING_SNAKE_CASE = name.replace("""norm1""" , """layernorm_before""" )
if "norm2" in name:
__SCREAMING_SNAKE_CASE = name.replace("""norm2""" , """layernorm_after""" )
if "mlp.fc1" in name:
__SCREAMING_SNAKE_CASE = name.replace("""mlp.fc1""" , """intermediate.dense""" )
if "mlp.fc2" in name:
__SCREAMING_SNAKE_CASE = name.replace("""mlp.fc2""" , """output.dense""" )
if "q_bias" in name:
__SCREAMING_SNAKE_CASE = name.replace("""q_bias""" , """query.bias""" )
if "k_bias" in name:
__SCREAMING_SNAKE_CASE = name.replace("""k_bias""" , """key.bias""" )
if "v_bias" in name:
__SCREAMING_SNAKE_CASE = name.replace("""v_bias""" , """value.bias""" )
if "cpb_mlp" in name:
__SCREAMING_SNAKE_CASE = name.replace("""cpb_mlp""" , """continuous_position_bias_mlp""" )
if "patch_embed.proj" in name:
__SCREAMING_SNAKE_CASE = name.replace("""patch_embed.proj""" , """patch_embed.projection""" )
if name == "norm.weight":
__SCREAMING_SNAKE_CASE = """layernorm.weight"""
if name == "norm.bias":
__SCREAMING_SNAKE_CASE = """layernorm.bias"""
if "conv_first" in name:
__SCREAMING_SNAKE_CASE = name.replace("""conv_first""" , """first_convolution""" )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
__SCREAMING_SNAKE_CASE = name.replace("""conv_last""" , """final_convolution""" )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
__SCREAMING_SNAKE_CASE = name.replace("""conv_before_upsample.0""" , """conv_before_upsample""" )
if "upsample.0" in name:
__SCREAMING_SNAKE_CASE = name.replace("""upsample.0""" , """upsample.convolution_0""" )
if "upsample.2" in name:
__SCREAMING_SNAKE_CASE = name.replace("""upsample.2""" , """upsample.convolution_1""" )
__SCREAMING_SNAKE_CASE = """upsample.""" + name
elif config.upsampler == "pixelshuffledirect":
__SCREAMING_SNAKE_CASE = name.replace("""upsample.0.weight""" , """upsample.conv.weight""" )
__SCREAMING_SNAKE_CASE = name.replace("""upsample.0.bias""" , """upsample.conv.bias""" )
else:
pass
else:
__SCREAMING_SNAKE_CASE = """swin2sr.""" + name
return name
def a__ ( a__ , a__ ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE = orig_state_dict.pop(a__ )
if "qkv" in key:
__SCREAMING_SNAKE_CASE = key.split(""".""" )
__SCREAMING_SNAKE_CASE = int(key_split[1] )
__SCREAMING_SNAKE_CASE = int(key_split[4] )
__SCREAMING_SNAKE_CASE = config.embed_dim
if "weight" in key:
__SCREAMING_SNAKE_CASE = val[:dim, :]
__SCREAMING_SNAKE_CASE = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE = val[:dim]
__SCREAMING_SNAKE_CASE = val[dim : dim * 2]
__SCREAMING_SNAKE_CASE = val[-dim:]
pass
else:
__SCREAMING_SNAKE_CASE = val
return orig_state_dict
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = get_config(a__ )
__SCREAMING_SNAKE_CASE = SwinaSRForImageSuperResolution(a__ )
model.eval()
__SCREAMING_SNAKE_CASE = torch.hub.load_state_dict_from_url(a__ , map_location="""cpu""" )
__SCREAMING_SNAKE_CASE = convert_state_dict(a__ , a__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = model.load_state_dict(a__ , strict=a__ )
if len(a__ ) > 0:
raise ValueError("""Missing keys when converting: {}""".format(a__ ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(F'Unexpected key {key} in state_dict' )
# verify values
__SCREAMING_SNAKE_CASE = """https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true"""
__SCREAMING_SNAKE_CASE = Image.open(requests.get(a__ , stream=a__ ).raw ).convert("""RGB""" )
__SCREAMING_SNAKE_CASE = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
__SCREAMING_SNAKE_CASE = 1_26 if """Jpeg""" in checkpoint_url else 2_56
__SCREAMING_SNAKE_CASE = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__SCREAMING_SNAKE_CASE = transforms(a__ ).unsqueeze(0 )
if config.num_channels == 1:
__SCREAMING_SNAKE_CASE = pixel_values[:, 0, :, :].unsqueeze(1 )
__SCREAMING_SNAKE_CASE = model(a__ )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
__SCREAMING_SNAKE_CASE = torch.Size([1, 3, 5_12, 5_12] )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-0.7_087, -0.7_138, -0.6_721], [-0.8_340, -0.8_095, -0.7_298], [-0.9_149, -0.8_414, -0.7_940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__SCREAMING_SNAKE_CASE = torch.Size([1, 3, 10_24, 10_24] )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-0.7_775, -0.8_105, -0.8_933], [-0.7_764, -0.8_356, -0.9_225], [-0.7_976, -0.8_686, -0.9_579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
__SCREAMING_SNAKE_CASE = torch.Size([1, 3, 10_24, 10_24] )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-0.8_035, -0.7_504, -0.7_491], [-0.8_538, -0.8_124, -0.7_782], [-0.8_804, -0.8_651, -0.8_493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__SCREAMING_SNAKE_CASE = torch.Size([1, 3, 5_12, 5_12] )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-0.7_669, -0.8_662, -0.8_767], [-0.8_810, -0.9_962, -0.9_820], [-0.9_340, -1.0_322, -1.1_149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__SCREAMING_SNAKE_CASE = torch.Size([1, 3, 10_24, 10_24] )
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-0.5_238, -0.5_557, -0.6_321], [-0.6_016, -0.5_903, -0.6_391], [-0.6_244, -0.6_334, -0.6_889]] )
assert (
outputs.reconstruction.shape == expected_shape
), F'Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , a__ , atol=1E-3 )
print("""Looks ok!""" )
__SCREAMING_SNAKE_CASE = {
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth""": (
"""swin2SR-classical-sr-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth""": (
"""swin2SR-classical-sr-x4-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth""": (
"""swin2SR-compressed-sr-x4-48"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth""": (
"""swin2SR-lightweight-x2-64"""
),
"""https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth""": (
"""swin2SR-realworld-sr-x4-64-bsrgan-psnr"""
),
}
__SCREAMING_SNAKE_CASE = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(F'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(a__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(a__ )
if push_to_hub:
model.push_to_hub(F'caidas/{model_name}' )
processor.push_to_hub(F'caidas/{model_name}' )
if __name__ == "__main__":
UpperCAmelCase : Optional[int] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--checkpoint_url',
default='https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth',
type=str,
help='URL of the original Swin2SR checkpoint you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument('--push_to_hub', action='store_true', help='Whether to push the converted model to the hub.')
UpperCAmelCase : List[str] = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub)
| 331 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
UpperCAmelCase : Tuple = {'configuration_reformer': ['REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ReformerConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[str] = ['ReformerTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : Tuple = ['ReformerTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase : List[Any] = [
'REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST',
'ReformerAttention',
'ReformerForMaskedLM',
'ReformerForQuestionAnswering',
'ReformerForSequenceClassification',
'ReformerLayer',
'ReformerModel',
'ReformerModelWithLMHead',
'ReformerPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
UpperCAmelCase : Any = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 331 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
__lowerCAmelCase : str ={
'configuration_owlvit': [
'OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP',
'OwlViTConfig',
'OwlViTOnnxConfig',
'OwlViTTextConfig',
'OwlViTVisionConfig',
],
'processing_owlvit': ['OwlViTProcessor'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : Optional[int] =['OwlViTFeatureExtractor']
__lowerCAmelCase : str =['OwlViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCAmelCase : int =[
'OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'OwlViTModel',
'OwlViTPreTrainedModel',
'OwlViTTextModel',
'OwlViTVisionModel',
'OwlViTForObjectDetection',
]
if TYPE_CHECKING:
from .configuration_owlvit import (
OWLVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
OwlViTConfig,
OwlViTOnnxConfig,
OwlViTTextConfig,
OwlViTVisionConfig,
)
from .processing_owlvit import OwlViTProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_owlvit import OwlViTFeatureExtractor
from .image_processing_owlvit import OwlViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_owlvit import (
OWLVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
OwlViTForObjectDetection,
OwlViTModel,
OwlViTPreTrainedModel,
OwlViTTextModel,
OwlViTVisionModel,
)
else:
import sys
__lowerCAmelCase : Optional[int] =_LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 9 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class snake_case__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Optional[int], _snake_case : List[Any], _snake_case : str=7, _snake_case : Tuple=3, _snake_case : List[str]=3_0, _snake_case : Tuple=4_0_0, _snake_case : Any=True, _snake_case : List[Any]=None, _snake_case : int=0.9, _snake_case : Optional[Any]=None, _snake_case : str=True, _snake_case : Union[str, Any]=[0.5, 0.5, 0.5], _snake_case : Union[str, Any]=[0.5, 0.5, 0.5], ) ->List[Any]:
snake_case__ : int = size if size is not None else {'shortest_edge': 3_0}
snake_case__ : Tuple = crop_size if crop_size is not None else {'height': 3_0, 'width': 3_0}
snake_case__ : Union[str, Any] = parent
snake_case__ : Dict = batch_size
snake_case__ : int = num_channels
snake_case__ : Tuple = min_resolution
snake_case__ : Any = max_resolution
snake_case__ : List[Any] = do_resize_and_center_crop
snake_case__ : str = size
snake_case__ : str = crop_pct
snake_case__ : List[str] = crop_size
snake_case__ : Optional[int] = do_normalize
snake_case__ : Tuple = image_mean
snake_case__ : Tuple = image_std
def lowercase_ ( self : Optional[int] ) ->int:
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class snake_case__ ( lowerCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
_SCREAMING_SNAKE_CASE = PoolFormerImageProcessor if is_vision_available() else None
def lowercase_ ( self : Union[str, Any] ) ->Dict:
snake_case__ : Union[str, Any] = PoolFormerImageProcessingTester(self )
@property
def lowercase_ ( self : int ) ->Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def lowercase_ ( self : Union[str, Any] ) ->Optional[int]:
snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_snake_case, 'do_resize_and_center_crop' ) )
self.assertTrue(hasattr(_snake_case, 'size' ) )
self.assertTrue(hasattr(_snake_case, 'crop_pct' ) )
self.assertTrue(hasattr(_snake_case, 'do_normalize' ) )
self.assertTrue(hasattr(_snake_case, 'image_mean' ) )
self.assertTrue(hasattr(_snake_case, 'image_std' ) )
def lowercase_ ( self : List[str] ) ->List[str]:
snake_case__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size, {'shortest_edge': 3_0} )
self.assertEqual(image_processor.crop_size, {'height': 3_0, 'width': 3_0} )
snake_case__ : int = self.image_processing_class.from_dict(self.image_processor_dict, size=4_2, crop_size=8_4 )
self.assertEqual(image_processor.size, {'shortest_edge': 4_2} )
self.assertEqual(image_processor.crop_size, {'height': 8_4, 'width': 8_4} )
def lowercase_ ( self : List[Any] ) ->List[Any]:
pass
def lowercase_ ( self : List[str] ) ->str:
# Initialize image_processing
snake_case__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester, equal_resolution=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case, Image.Image )
# Test not batched input
snake_case__ : Optional[int] = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
# Test batched
snake_case__ : str = image_processing(_snake_case, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
def lowercase_ ( self : int ) ->List[Any]:
# Initialize image_processing
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : Dict = prepare_image_inputs(self.image_processor_tester, equal_resolution=_snake_case, numpify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case, np.ndarray )
# Test not batched input
snake_case__ : Dict = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
# Test batched
snake_case__ : List[Any] = image_processing(_snake_case, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
def lowercase_ ( self : List[str] ) ->List[str]:
# Initialize image_processing
snake_case__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester, equal_resolution=_snake_case, torchify=_snake_case )
for image in image_inputs:
self.assertIsInstance(_snake_case, torch.Tensor )
# Test not batched input
snake_case__ : Tuple = image_processing(image_inputs[0], return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
# Test batched
snake_case__ : Optional[Any] = image_processing(_snake_case, return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape, (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
), )
| 277 | 0 |
'''simple docstring'''
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def A_( A : Dict):
UpperCamelCase = torch.exp(A)
UpperCamelCase = torch.sum(A , dim=1) # sum of exp(x_i)
UpperCamelCase = torch.sum(x * exp_x , dim=1) # sum of x_i * exp(x_i)
return torch.log(A) - B / A
class SCREAMING_SNAKE_CASE__ ( nn.Module):
def __init__( self , A_ )-> Optional[int]:
'''simple docstring'''
super().__init__()
UpperCamelCase = config.output_attentions
UpperCamelCase = config.output_hidden_states
UpperCamelCase = nn.ModuleList([BertLayer(A_ ) for _ in range(config.num_hidden_layers )] )
UpperCamelCase = nn.ModuleList([BertHighway(A_ ) for _ in range(config.num_hidden_layers )] )
UpperCamelCase = [-1 for _ in range(config.num_hidden_layers )]
def UpperCAmelCase_ ( self , A_ )-> Optional[Any]:
'''simple docstring'''
if (type(A_ ) is float) or (type(A_ ) is int):
for i in range(len(self.early_exit_entropy ) ):
UpperCamelCase = x
else:
UpperCamelCase = x
def UpperCAmelCase_ ( self , A_ )-> List[str]:
'''simple docstring'''
UpperCamelCase = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def UpperCAmelCase_ ( self , A_ , A_=None , A_=None , A_=None , A_=None , )-> Any:
'''simple docstring'''
UpperCamelCase = ()
UpperCamelCase = ()
UpperCamelCase = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
UpperCamelCase = all_hidden_states + (hidden_states,)
UpperCamelCase = layer_module(
A_ , A_ , head_mask[i] , A_ , A_ )
UpperCamelCase = layer_outputs[0]
if self.output_attentions:
UpperCamelCase = all_attentions + (layer_outputs[1],)
UpperCamelCase = (hidden_states,)
if self.output_hidden_states:
UpperCamelCase = current_outputs + (all_hidden_states,)
if self.output_attentions:
UpperCamelCase = current_outputs + (all_attentions,)
UpperCamelCase = self.highway[i](A_ )
# logits, pooled_output
if not self.training:
UpperCamelCase = highway_exit[0]
UpperCamelCase = entropy(A_ )
UpperCamelCase = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
UpperCamelCase = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
UpperCamelCase = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(A_ , i + 1 )
else:
UpperCamelCase = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
UpperCamelCase = all_hidden_states + (hidden_states,)
UpperCamelCase = (hidden_states,)
if self.output_hidden_states:
UpperCamelCase = outputs + (all_hidden_states,)
if self.output_attentions:
UpperCamelCase = outputs + (all_attentions,)
UpperCamelCase = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
"""The Bert Model transformer with early exiting (DeeBERT). """ , snake_case_ , )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , A_ )-> Dict:
'''simple docstring'''
super().__init__(A_ )
UpperCamelCase = config
UpperCamelCase = BertEmbeddings(A_ )
UpperCamelCase = DeeBertEncoder(A_ )
UpperCamelCase = BertPooler(A_ )
self.init_weights()
def UpperCAmelCase_ ( self )-> Optional[int]:
'''simple docstring'''
self.encoder.init_highway_pooler(self.pooler )
def UpperCAmelCase_ ( self )-> int:
'''simple docstring'''
return self.embeddings.word_embeddings
def UpperCAmelCase_ ( self , A_ )-> List[str]:
'''simple docstring'''
UpperCamelCase = value
def UpperCAmelCase_ ( self , A_ )-> Dict:
'''simple docstring'''
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(A_ )
@add_start_docstrings_to_model_forward(A_ )
def UpperCAmelCase_ ( self , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , )-> Optional[int]:
'''simple docstring'''
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
UpperCamelCase = input_ids.size()
elif inputs_embeds is not None:
UpperCamelCase = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
UpperCamelCase = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
UpperCamelCase = torch.ones(A_ , device=A_ )
if encoder_attention_mask is None:
UpperCamelCase = torch.ones(A_ , device=A_ )
if token_type_ids is None:
UpperCamelCase = torch.zeros(A_ , dtype=torch.long , device=A_ )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
UpperCamelCase = self.get_extended_attention_mask(A_ , A_ , A_ )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
UpperCamelCase = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
UpperCamelCase = encoder_attention_mask[:, None, None, :]
UpperCamelCase = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
UpperCamelCase = (1.0 - encoder_extended_attention_mask) * -10_000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
UpperCamelCase = self.get_head_mask(A_ , self.config.num_hidden_layers )
UpperCamelCase = self.embeddings(
input_ids=A_ , position_ids=A_ , token_type_ids=A_ , inputs_embeds=A_ )
UpperCamelCase = self.encoder(
A_ , attention_mask=A_ , head_mask=A_ , encoder_hidden_states=A_ , encoder_attention_mask=A_ , )
UpperCamelCase = encoder_outputs[0]
UpperCamelCase = self.pooler(A_ )
UpperCamelCase = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , A_ , A_ )-> List[str]:
'''simple docstring'''
UpperCamelCase = message
UpperCamelCase = exit_layer # start from 1!
class SCREAMING_SNAKE_CASE__ ( nn.Module):
def __init__( self , A_ )-> Optional[int]:
'''simple docstring'''
super().__init__()
UpperCamelCase = BertPooler(A_ )
UpperCamelCase = nn.Dropout(config.hidden_dropout_prob )
UpperCamelCase = nn.Linear(config.hidden_size , config.num_labels )
def UpperCAmelCase_ ( self , A_ )-> Union[str, Any]:
'''simple docstring'''
UpperCamelCase = encoder_outputs[0]
UpperCamelCase = self.pooler(A_ )
# "return" pooler_output
# BertModel
UpperCamelCase = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
UpperCamelCase = bmodel_output[1]
UpperCamelCase = self.dropout(A_ )
UpperCamelCase = self.classifier(A_ )
return logits, pooled_output
@add_start_docstrings(
"""Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. """ , snake_case_ , )
class SCREAMING_SNAKE_CASE__ ( snake_case_):
def __init__( self , A_ )-> Any:
'''simple docstring'''
super().__init__(A_ )
UpperCamelCase = config.num_labels
UpperCamelCase = config.num_hidden_layers
UpperCamelCase = DeeBertModel(A_ )
UpperCamelCase = nn.Dropout(config.hidden_dropout_prob )
UpperCamelCase = nn.Linear(config.hidden_size , self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(A_ )
def UpperCAmelCase_ ( self , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=None , A_=-1 , A_=False , )-> Optional[int]:
'''simple docstring'''
UpperCamelCase = self.num_layers
try:
UpperCamelCase = self.bert(
A_ , attention_mask=A_ , token_type_ids=A_ , position_ids=A_ , head_mask=A_ , inputs_embeds=A_ , )
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
UpperCamelCase = outputs[1]
UpperCamelCase = self.dropout(A_ )
UpperCamelCase = self.classifier(A_ )
UpperCamelCase = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
UpperCamelCase = e.message
UpperCamelCase = e.exit_layer
UpperCamelCase = outputs[0]
if not self.training:
UpperCamelCase = entropy(A_ )
UpperCamelCase = []
UpperCamelCase = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
UpperCamelCase = MSELoss()
UpperCamelCase = loss_fct(logits.view(-1 ) , labels.view(-1 ) )
else:
UpperCamelCase = CrossEntropyLoss()
UpperCamelCase = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
# work with highway exits
UpperCamelCase = []
for highway_exit in outputs[-1]:
UpperCamelCase = highway_exit[0]
if not self.training:
highway_logits_all.append(A_ )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
UpperCamelCase = MSELoss()
UpperCamelCase = loss_fct(highway_logits.view(-1 ) , labels.view(-1 ) )
else:
UpperCamelCase = CrossEntropyLoss()
UpperCamelCase = loss_fct(highway_logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
highway_losses.append(A_ )
if train_highway:
UpperCamelCase = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
UpperCamelCase = (loss,) + outputs
if not self.training:
UpperCamelCase = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
UpperCamelCase = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 251 |
'''simple docstring'''
from ... import PretrainedConfig
lowerCAmelCase : List[str] = {
'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json',
}
class SCREAMING_SNAKE_CASE__ ( snake_case_):
lowerCAmelCase_ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
lowerCAmelCase_ = """nezha"""
def __init__( self , A_=21128 , A_=768 , A_=12 , A_=12 , A_=3072 , A_="gelu" , A_=0.1 , A_=0.1 , A_=512 , A_=64 , A_=2 , A_=0.02 , A_=1e-12 , A_=0.1 , A_=0 , A_=2 , A_=3 , A_=True , **A_ , )-> List[str]:
'''simple docstring'''
super().__init__(pad_token_id=A_ , bos_token_id=A_ , eos_token_id=A_ , **A_ )
UpperCamelCase = vocab_size
UpperCamelCase = hidden_size
UpperCamelCase = num_hidden_layers
UpperCamelCase = num_attention_heads
UpperCamelCase = hidden_act
UpperCamelCase = intermediate_size
UpperCamelCase = hidden_dropout_prob
UpperCamelCase = attention_probs_dropout_prob
UpperCamelCase = max_position_embeddings
UpperCamelCase = max_relative_position
UpperCamelCase = type_vocab_size
UpperCamelCase = initializer_range
UpperCamelCase = layer_norm_eps
UpperCamelCase = classifier_dropout
UpperCamelCase = use_cache
| 251 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class UpperCAmelCase ( UpperCAmelCase__ ):
'''simple docstring'''
lowerCAmelCase_ = ['''image_processor''', '''tokenizer''']
lowerCAmelCase_ = '''ViTImageProcessor'''
lowerCAmelCase_ = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self : int , __lowercase : Optional[Any]=None , __lowercase : Dict=None , **__lowercase : Union[str, Any] ):
"""simple docstring"""
snake_case_ = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __lowercase , )
snake_case_ = kwargs.pop("feature_extractor" )
snake_case_ = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__lowercase , __lowercase )
def __call__( self : Any , __lowercase : Optional[int]=None , __lowercase : int=None , __lowercase : List[Any]=None , __lowercase : Tuple=None , **__lowercase : List[str] ):
"""simple docstring"""
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
snake_case_ = self.tokenizer(__lowercase , return_tensors=__lowercase , **__lowercase )
if visual_prompt is not None:
snake_case_ = self.image_processor(__lowercase , return_tensors=__lowercase , **__lowercase )
if images is not None:
snake_case_ = self.image_processor(__lowercase , return_tensors=__lowercase , **__lowercase )
if visual_prompt is not None and images is not None:
snake_case_ = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
snake_case_ = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
snake_case_ = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**__lowercase ) , tensor_type=__lowercase )
def snake_case__ ( self : int , *__lowercase : Union[str, Any] , **__lowercase : Tuple ):
"""simple docstring"""
return self.tokenizer.batch_decode(*__lowercase , **__lowercase )
def snake_case__ ( self : Optional[int] , *__lowercase : Dict , **__lowercase : List[str] ):
"""simple docstring"""
return self.tokenizer.decode(*__lowercase , **__lowercase )
@property
def snake_case__ ( self : Optional[Any] ):
"""simple docstring"""
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __lowercase , )
return self.image_processor_class
@property
def snake_case__ ( self : Dict ):
"""simple docstring"""
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __lowercase , )
return self.image_processor
| 187 |
import argparse
import os.path as osp
import re
import torch
from safetensors.torch import load_file, save_file
# =================#
# UNet Conversion #
# =================#
lowercase__ : Optional[int] = [
# (stable-diffusion, HF Diffusers)
("time_embed.0.weight", "time_embedding.linear_1.weight"),
("time_embed.0.bias", "time_embedding.linear_1.bias"),
("time_embed.2.weight", "time_embedding.linear_2.weight"),
("time_embed.2.bias", "time_embedding.linear_2.bias"),
("input_blocks.0.0.weight", "conv_in.weight"),
("input_blocks.0.0.bias", "conv_in.bias"),
("out.0.weight", "conv_norm_out.weight"),
("out.0.bias", "conv_norm_out.bias"),
("out.2.weight", "conv_out.weight"),
("out.2.bias", "conv_out.bias"),
]
lowercase__ : List[Any] = [
# (stable-diffusion, HF Diffusers)
("in_layers.0", "norm1"),
("in_layers.2", "conv1"),
("out_layers.0", "norm2"),
("out_layers.3", "conv2"),
("emb_layers.1", "time_emb_proj"),
("skip_connection", "conv_shortcut"),
]
lowercase__ : Optional[Any] = []
# hardcoded number of downblocks and resnets/attentions...
# would need smarter logic for other networks.
for i in range(4):
# loop over downblocks/upblocks
for j in range(2):
# loop over resnets/attentions for downblocks
lowercase__ : str = f'''down_blocks.{i}.resnets.{j}.'''
lowercase__ : Union[str, Any] = f'''input_blocks.{3*i + j + 1}.0.'''
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))
if i < 3:
# no attention layers in down_blocks.3
lowercase__ : Tuple = f'''down_blocks.{i}.attentions.{j}.'''
lowercase__ : Dict = f'''input_blocks.{3*i + j + 1}.1.'''
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))
for j in range(3):
# loop over resnets/attentions for upblocks
lowercase__ : List[Any] = f'''up_blocks.{i}.resnets.{j}.'''
lowercase__ : int = f'''output_blocks.{3*i + j}.0.'''
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))
if i > 0:
# no attention layers in up_blocks.0
lowercase__ : List[str] = f'''up_blocks.{i}.attentions.{j}.'''
lowercase__ : Tuple = f'''output_blocks.{3*i + j}.1.'''
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))
if i < 3:
# no downsample in down_blocks.3
lowercase__ : List[str] = f'''down_blocks.{i}.downsamplers.0.conv.'''
lowercase__ : Any = f'''input_blocks.{3*(i+1)}.0.op.'''
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))
# no upsample in up_blocks.3
lowercase__ : Optional[int] = f'''up_blocks.{i}.upsamplers.0.'''
lowercase__ : int = f'''output_blocks.{3*i + 2}.{1 if i == 0 else 2}.'''
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))
lowercase__ : Union[str, Any] = "mid_block.attentions.0."
lowercase__ : List[Any] = "middle_block.1."
unet_conversion_map_layer.append((sd_mid_atn_prefix, hf_mid_atn_prefix))
for j in range(2):
lowercase__ : Tuple = f'''mid_block.resnets.{j}.'''
lowercase__ : List[str] = f'''middle_block.{2*j}.'''
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = {k: k for k in unet_state_dict.keys()}
for sd_name, hf_name in unet_conversion_map:
snake_case_ = sd_name
for k, v in mapping.items():
if "resnets" in k:
for sd_part, hf_part in unet_conversion_map_resnet:
snake_case_ = v.replace(_A , _A )
snake_case_ = v
for k, v in mapping.items():
for sd_part, hf_part in unet_conversion_map_layer:
snake_case_ = v.replace(_A , _A )
snake_case_ = v
snake_case_ = {v: unet_state_dict[k] for k, v in mapping.items()}
return new_state_dict
# ================#
# VAE Conversion #
# ================#
lowercase__ : Dict = [
# (stable-diffusion, HF Diffusers)
("nin_shortcut", "conv_shortcut"),
("norm_out", "conv_norm_out"),
("mid.attn_1.", "mid_block.attentions.0."),
]
for i in range(4):
# down_blocks have two resnets
for j in range(2):
lowercase__ : Any = f'''encoder.down_blocks.{i}.resnets.{j}.'''
lowercase__ : List[Any] = f'''encoder.down.{i}.block.{j}.'''
vae_conversion_map.append((sd_down_prefix, hf_down_prefix))
if i < 3:
lowercase__ : Optional[int] = f'''down_blocks.{i}.downsamplers.0.'''
lowercase__ : Tuple = f'''down.{i}.downsample.'''
vae_conversion_map.append((sd_downsample_prefix, hf_downsample_prefix))
lowercase__ : List[str] = f'''up_blocks.{i}.upsamplers.0.'''
lowercase__ : Optional[int] = f'''up.{3-i}.upsample.'''
vae_conversion_map.append((sd_upsample_prefix, hf_upsample_prefix))
# up_blocks have three resnets
# also, up blocks in hf are numbered in reverse from sd
for j in range(3):
lowercase__ : int = f'''decoder.up_blocks.{i}.resnets.{j}.'''
lowercase__ : Union[str, Any] = f'''decoder.up.{3-i}.block.{j}.'''
vae_conversion_map.append((sd_up_prefix, hf_up_prefix))
# this part accounts for mid blocks in both the encoder and the decoder
for i in range(2):
lowercase__ : Dict = f'''mid_block.resnets.{i}.'''
lowercase__ : int = f'''mid.block_{i+1}.'''
vae_conversion_map.append((sd_mid_res_prefix, hf_mid_res_prefix))
lowercase__ : Optional[Any] = [
# (stable-diffusion, HF Diffusers)
("norm.", "group_norm."),
("q.", "query."),
("k.", "key."),
("v.", "value."),
("proj_out.", "proj_attn."),
]
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return w.reshape(*w.shape , 1 , 1 )
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = {k: k for k in vae_state_dict.keys()}
for k, v in mapping.items():
for sd_part, hf_part in vae_conversion_map:
snake_case_ = v.replace(_A , _A )
snake_case_ = v
for k, v in mapping.items():
if "attentions" in k:
for sd_part, hf_part in vae_conversion_map_attn:
snake_case_ = v.replace(_A , _A )
snake_case_ = v
snake_case_ = {v: vae_state_dict[k] for k, v in mapping.items()}
snake_case_ = ["q", "k", "v", "proj_out"]
for k, v in new_state_dict.items():
for weight_name in weights_to_convert:
if f"mid.attn_1.{weight_name}.weight" in k:
print(f"Reshaping {k} for SD format" )
snake_case_ = reshape_weight_for_sd(_A )
return new_state_dict
# =========================#
# Text Encoder Conversion #
# =========================#
lowercase__ : int = [
# (stable-diffusion, HF Diffusers)
("resblocks.", "text_model.encoder.layers."),
("ln_1", "layer_norm1"),
("ln_2", "layer_norm2"),
(".c_fc.", ".fc1."),
(".c_proj.", ".fc2."),
(".attn", ".self_attn"),
("ln_final.", "transformer.text_model.final_layer_norm."),
("token_embedding.weight", "transformer.text_model.embeddings.token_embedding.weight"),
("positional_embedding", "transformer.text_model.embeddings.position_embedding.weight"),
]
lowercase__ : Dict = {re.escape(x[1]): x[0] for x in textenc_conversion_lst}
lowercase__ : Tuple = re.compile("|".join(protected.keys()))
# Ordering is from https://github.com/pytorch/pytorch/blob/master/test/cpp/api/modules.cpp
lowercase__ : Dict = {"q": 0, "k": 1, "v": 2}
def lowerCamelCase__ ( _A ):
'''simple docstring'''
snake_case_ = {}
snake_case_ = {}
snake_case_ = {}
for k, v in text_enc_dict.items():
if (
k.endswith(".self_attn.q_proj.weight" )
or k.endswith(".self_attn.k_proj.weight" )
or k.endswith(".self_attn.v_proj.weight" )
):
snake_case_ = k[: -len(".q_proj.weight" )]
snake_case_ = k[-len("q_proj.weight" )]
if k_pre not in capture_qkv_weight:
snake_case_ = [None, None, None]
snake_case_ = v
continue
if (
k.endswith(".self_attn.q_proj.bias" )
or k.endswith(".self_attn.k_proj.bias" )
or k.endswith(".self_attn.v_proj.bias" )
):
snake_case_ = k[: -len(".q_proj.bias" )]
snake_case_ = k[-len("q_proj.bias" )]
if k_pre not in capture_qkv_bias:
snake_case_ = [None, None, None]
snake_case_ = v
continue
snake_case_ = textenc_pattern.sub(lambda _A : protected[re.escape(m.group(0 ) )] , _A )
snake_case_ = v
for k_pre, tensors in capture_qkv_weight.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
snake_case_ = textenc_pattern.sub(lambda _A : protected[re.escape(m.group(0 ) )] , _A )
snake_case_ = torch.cat(_A )
for k_pre, tensors in capture_qkv_bias.items():
if None in tensors:
raise Exception("CORRUPTED MODEL: one of the q-k-v values for the text encoder was missing" )
snake_case_ = textenc_pattern.sub(lambda _A : protected[re.escape(m.group(0 ) )] , _A )
snake_case_ = torch.cat(_A )
return new_state_dict
def lowerCamelCase__ ( _A ):
'''simple docstring'''
return text_enc_dict
if __name__ == "__main__":
lowercase__ : int = argparse.ArgumentParser()
parser.add_argument("--model_path", default=None, type=str, required=True, help="Path to the model to convert.")
parser.add_argument("--checkpoint_path", default=None, type=str, required=True, help="Path to the output model.")
parser.add_argument("--half", action="store_true", help="Save weights in half precision.")
parser.add_argument(
"--use_safetensors", action="store_true", help="Save weights use safetensors, default is ckpt."
)
lowercase__ : Dict = parser.parse_args()
assert args.model_path is not None, "Must provide a model path!"
assert args.checkpoint_path is not None, "Must provide a checkpoint path!"
# Path for safetensors
lowercase__ : Tuple = osp.join(args.model_path, "unet", "diffusion_pytorch_model.safetensors")
lowercase__ : int = osp.join(args.model_path, "vae", "diffusion_pytorch_model.safetensors")
lowercase__ : Any = osp.join(args.model_path, "text_encoder", "model.safetensors")
# Load models from safetensors if it exists, if it doesn't pytorch
if osp.exists(unet_path):
lowercase__ : str = load_file(unet_path, device="cpu")
else:
lowercase__ : Optional[Any] = osp.join(args.model_path, "unet", "diffusion_pytorch_model.bin")
lowercase__ : Any = torch.load(unet_path, map_location="cpu")
if osp.exists(vae_path):
lowercase__ : Tuple = load_file(vae_path, device="cpu")
else:
lowercase__ : Any = osp.join(args.model_path, "vae", "diffusion_pytorch_model.bin")
lowercase__ : Dict = torch.load(vae_path, map_location="cpu")
if osp.exists(text_enc_path):
lowercase__ : Union[str, Any] = load_file(text_enc_path, device="cpu")
else:
lowercase__ : Union[str, Any] = osp.join(args.model_path, "text_encoder", "pytorch_model.bin")
lowercase__ : Optional[int] = torch.load(text_enc_path, map_location="cpu")
# Convert the UNet model
lowercase__ : Dict = convert_unet_state_dict(unet_state_dict)
lowercase__ : Any = {"model.diffusion_model." + k: v for k, v in unet_state_dict.items()}
# Convert the VAE model
lowercase__ : Dict = convert_vae_state_dict(vae_state_dict)
lowercase__ : Union[str, Any] = {"first_stage_model." + k: v for k, v in vae_state_dict.items()}
# Easiest way to identify v2.0 model seems to be that the text encoder (OpenCLIP) is deeper
lowercase__ : Optional[Any] = "text_model.encoder.layers.22.layer_norm2.bias" in text_enc_dict
if is_vaa_model:
# Need to add the tag 'transformer' in advance so we can knock it out from the final layer-norm
lowercase__ : Any = {"transformer." + k: v for k, v in text_enc_dict.items()}
lowercase__ : List[Any] = convert_text_enc_state_dict_vaa(text_enc_dict)
lowercase__ : List[str] = {"cond_stage_model.model." + k: v for k, v in text_enc_dict.items()}
else:
lowercase__ : Tuple = convert_text_enc_state_dict(text_enc_dict)
lowercase__ : Any = {"cond_stage_model.transformer." + k: v for k, v in text_enc_dict.items()}
# Put together new checkpoint
lowercase__ : Tuple = {**unet_state_dict, **vae_state_dict, **text_enc_dict}
if args.half:
lowercase__ : Any = {k: v.half() for k, v in state_dict.items()}
if args.use_safetensors:
save_file(state_dict, args.checkpoint_path)
else:
lowercase__ : Union[str, Any] = {"state_dict": state_dict}
torch.save(state_dict, args.checkpoint_path)
| 187 | 1 |
from __future__ import annotations
def __UpperCamelCase ( lowerCAmelCase__ : int ):
__a : List[str] = [True] * limit
__a : Union[str, Any] = False
__a : Union[str, Any] = False
__a : Union[str, Any] = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
__a : List[str] = i * 2
while index < limit:
__a : str = False
__a : Optional[int] = index + i
__a : Dict = [2]
for i in range(3 , lowerCAmelCase__ , 2 ):
if is_prime[i]:
primes.append(lowerCAmelCase__ )
return primes
def __UpperCamelCase ( lowerCAmelCase__ : int = 1_0_0_0_0_0_0 ):
__a : Optional[Any] = prime_sieve(lowerCAmelCase__ )
__a : str = 0
__a : Optional[int] = 0
for i in range(len(lowerCAmelCase__ ) ):
for j in range(i + length , len(lowerCAmelCase__ ) ):
__a : Any = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
__a : Tuple = j - i
__a : Tuple = sol
return largest
if __name__ == "__main__":
print(F"""{solution() = }""")
| 360 |
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class UpperCamelCase__ :
def __init__(self : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any]=1_3 , snake_case_ : str=3_2 , snake_case_ : Any=2 , snake_case_ : Union[str, Any]=3 , snake_case_ : int=1_6 , snake_case_ : Optional[int]=[3_2, 6_4, 1_2_8] , snake_case_ : str=[1, 2, 1] , snake_case_ : str=[2, 2, 4] , snake_case_ : List[str]=2 , snake_case_ : List[str]=2.0 , snake_case_ : List[Any]=True , snake_case_ : Tuple=0.0 , snake_case_ : Optional[Any]=0.0 , snake_case_ : int=0.1 , snake_case_ : Optional[int]="gelu" , snake_case_ : List[str]=False , snake_case_ : Optional[int]=True , snake_case_ : Optional[int]=0.02 , snake_case_ : List[str]=1E-5 , snake_case_ : List[Any]=True , snake_case_ : int=None , snake_case_ : List[Any]=True , snake_case_ : Optional[Any]=1_0 , snake_case_ : Union[str, Any]=8 , snake_case_ : Optional[Any]=["stage1", "stage2"] , snake_case_ : List[Any]=[1, 2] , ):
__a : Tuple = parent
__a : str = batch_size
__a : Any = image_size
__a : List[Any] = patch_size
__a : List[Any] = num_channels
__a : List[str] = embed_dim
__a : str = hidden_sizes
__a : Any = depths
__a : List[str] = num_heads
__a : Any = window_size
__a : List[str] = mlp_ratio
__a : Optional[int] = qkv_bias
__a : Any = hidden_dropout_prob
__a : List[str] = attention_probs_dropout_prob
__a : str = drop_path_rate
__a : Optional[Any] = hidden_act
__a : Optional[int] = use_absolute_embeddings
__a : List[str] = patch_norm
__a : int = layer_norm_eps
__a : Optional[Any] = initializer_range
__a : List[str] = is_training
__a : Dict = scope
__a : Optional[Any] = use_labels
__a : Union[str, Any] = type_sequence_label_size
__a : Optional[int] = encoder_stride
__a : str = out_features
__a : Optional[int] = out_indices
def lowerCAmelCase (self : Dict ):
__a : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__a : Dict = None
if self.use_labels:
__a : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__a : List[str] = self.get_config()
return config, pixel_values, labels
def lowerCAmelCase (self : Optional[Any] ):
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def lowerCAmelCase (self : Dict , snake_case_ : int , snake_case_ : Tuple , snake_case_ : str ):
__a : int = FocalNetModel(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : Dict = model(snake_case_ )
__a : Union[str, Any] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__a : Dict = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def lowerCAmelCase (self : Tuple , snake_case_ : Any , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] ):
__a : List[str] = FocalNetBackbone(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : List[str] = model(snake_case_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
__a : Union[str, Any] = None
__a : Tuple = FocalNetBackbone(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : int = model(snake_case_ )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def lowerCAmelCase (self : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : Dict , snake_case_ : Optional[Any] ):
__a : List[str] = FocalNetForMaskedImageModeling(config=snake_case_ )
model.to(snake_case_ )
model.eval()
__a : int = model(snake_case_ )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__a : str = 1
__a : Optional[Any] = FocalNetForMaskedImageModeling(snake_case_ )
model.to(snake_case_ )
model.eval()
__a : Any = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a : Union[str, Any] = model(snake_case_ )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def lowerCAmelCase (self : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : int ):
__a : Dict = self.type_sequence_label_size
__a : Optional[Any] = FocalNetForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
__a : Union[str, Any] = model(snake_case_ , labels=snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__a : Optional[int] = 1
__a : str = FocalNetForImageClassification(snake_case_ )
model.to(snake_case_ )
model.eval()
__a : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__a : List[Any] = model(snake_case_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def lowerCAmelCase (self : List[Any] ):
__a : Any = self.prepare_config_and_inputs()
__a , __a , __a : Any = config_and_inputs
__a : Tuple = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class UpperCamelCase__ ( __lowercase ,__lowercase ,unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Optional[Any] = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
_SCREAMING_SNAKE_CASE : Optional[Any] = (
{"feature-extraction": FocalNetModel, "image-classification": FocalNetForImageClassification}
if is_torch_available()
else {}
)
_SCREAMING_SNAKE_CASE : str = False
_SCREAMING_SNAKE_CASE : str = False
_SCREAMING_SNAKE_CASE : Optional[Any] = False
_SCREAMING_SNAKE_CASE : Any = False
_SCREAMING_SNAKE_CASE : Dict = False
def lowerCAmelCase (self : List[Any] ):
__a : Union[str, Any] = FocalNetModelTester(self )
__a : Dict = ConfigTester(self , config_class=snake_case_ , embed_dim=3_7 , has_text_modality=snake_case_ )
def lowerCAmelCase (self : Any ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCAmelCase (self : Dict ):
return
def lowerCAmelCase (self : Dict ):
__a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case_ )
def lowerCAmelCase (self : Tuple ):
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*snake_case_ )
def lowerCAmelCase (self : Any ):
__a : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*snake_case_ )
def lowerCAmelCase (self : Optional[int] ):
__a : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case_ )
@unittest.skip(reason='''FocalNet does not use inputs_embeds''' )
def lowerCAmelCase (self : Optional[Any] ):
pass
@unittest.skip(reason='''FocalNet does not use feedforward chunking''' )
def lowerCAmelCase (self : str ):
pass
def lowerCAmelCase (self : Tuple ):
__a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__a : int = model_class(snake_case_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__a : List[Any] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(snake_case_ , nn.Linear ) )
def lowerCAmelCase (self : Optional[int] ):
__a , __a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__a : str = model_class(snake_case_ )
__a : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__a : Dict = [*signature.parameters.keys()]
__a : str = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , snake_case_ )
def lowerCAmelCase (self : Tuple , snake_case_ : Tuple , snake_case_ : Dict , snake_case_ : Any , snake_case_ : Optional[Any] ):
__a : Any = model_class(snake_case_ )
model.to(snake_case_ )
model.eval()
with torch.no_grad():
__a : Dict = model(**self._prepare_for_class(snake_case_ , snake_case_ ) )
__a : Union[str, Any] = outputs.hidden_states
__a : Dict = getattr(
self.model_tester , '''expected_num_hidden_layers''' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(snake_case_ ) , snake_case_ )
# FocalNet has a different seq_length
__a : int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__a : Dict = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
__a : Optional[Any] = outputs.reshaped_hidden_states
self.assertEqual(len(snake_case_ ) , snake_case_ )
__a , __a , __a , __a : List[Any] = reshaped_hidden_states[0].shape
__a : List[str] = (
reshaped_hidden_states[0].view(snake_case_ , snake_case_ , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def lowerCAmelCase (self : Optional[int] ):
__a , __a : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__a : Dict = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
__a : Any = True
self.check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : Optional[int] = True
self.check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
def lowerCAmelCase (self : List[Any] ):
__a , __a : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__a : List[str] = 3
__a : Optional[int] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__a : List[str] = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__a : List[Any] = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__a : Union[str, Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
__a : int = True
self.check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__a : List[str] = True
self.check_hidden_states_output(snake_case_ , snake_case_ , snake_case_ , (padded_height, padded_width) )
@slow
def lowerCAmelCase (self : str ):
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__a : Dict = FocalNetModel.from_pretrained(snake_case_ )
self.assertIsNotNone(snake_case_ )
def lowerCAmelCase (self : List[Any] ):
__a , __a : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__a : Optional[int] = _config_zero_init(snake_case_ )
for model_class in self.all_model_classes:
__a : str = model_class(config=snake_case_ )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f"Parameter {name} of model {model_class} seems not properly initialized" , )
@require_vision
@require_torch
class UpperCamelCase__ ( unittest.TestCase ):
@cached_property
def lowerCAmelCase (self : str ):
# TODO update organization
return AutoImageProcessor.from_pretrained('''microsoft/focalnet-tiny''' ) if is_vision_available() else None
@slow
def lowerCAmelCase (self : str ):
__a : int = FocalNetForImageClassification.from_pretrained('''microsoft/focalnet-tiny''' ).to(snake_case_ )
__a : Optional[Any] = self.default_image_processor
__a : Dict = Image.open('''./tests/fixtures/tests_samples/COCO/000000039769.png''' )
__a : Optional[Any] = image_processor(images=snake_case_ , return_tensors='''pt''' ).to(snake_case_ )
# forward pass
with torch.no_grad():
__a : Any = model(**snake_case_ )
# verify the logits
__a : List[Any] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , snake_case_ )
__a : Union[str, Any] = torch.tensor([0.2166, -0.4368, 0.2191] ).to(snake_case_ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case_ , atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 2_8_1 )
@require_torch
class UpperCamelCase__ ( __lowercase ,unittest.TestCase ):
_SCREAMING_SNAKE_CASE : Tuple = (FocalNetBackbone,) if is_torch_available() else ()
_SCREAMING_SNAKE_CASE : int = FocalNetConfig
_SCREAMING_SNAKE_CASE : Any = False
def lowerCAmelCase (self : Tuple ):
__a : Union[str, Any] = FocalNetModelTester(self )
| 90 | 0 |
'''simple docstring'''
import pytest
from datasets.parallel import ParallelBackendConfig, parallel_backend
from datasets.utils.py_utils import map_nested
from .utils import require_dill_gt_0_3_2, require_joblibspark, require_not_windows
def SCREAMING_SNAKE_CASE__ ( __A ) -> Dict: # picklable for multiprocessing
return i + 1
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
def SCREAMING_SNAKE_CASE__ ( ) -> Optional[int]:
with parallel_backend('spark' ):
assert ParallelBackendConfig.backend_name == "spark"
_snake_case = [1, 2, 3]
with pytest.raises(__A ):
with parallel_backend('unsupported backend' ):
map_nested(__A , __A , num_proc=2 )
with pytest.raises(__A ):
with parallel_backend('unsupported backend' ):
map_nested(__A , __A , num_proc=-1 )
@require_dill_gt_0_3_2
@require_joblibspark
@require_not_windows
@pytest.mark.parametrize('num_proc' , [2, -1] )
def SCREAMING_SNAKE_CASE__ ( __A ) -> List[str]:
_snake_case = [1, 2]
_snake_case = {'a': 1, 'b': 2}
_snake_case = {'a': [1, 2], 'b': [3, 4]}
_snake_case = {'a': {'1': 1}, 'b': 2}
_snake_case = {'a': 1, 'b': 2, 'c': 3, 'd': 4}
_snake_case = [2, 3]
_snake_case = {'a': 2, 'b': 3}
_snake_case = {'a': [2, 3], 'b': [4, 5]}
_snake_case = {'a': {'1': 2}, 'b': 3}
_snake_case = {'a': 2, 'b': 3, 'c': 4, 'd': 5}
with parallel_backend('spark' ):
assert map_nested(__A , __A , num_proc=__A ) == expected_map_nested_sa
assert map_nested(__A , __A , num_proc=__A ) == expected_map_nested_sa
assert map_nested(__A , __A , num_proc=__A ) == expected_map_nested_sa
assert map_nested(__A , __A , num_proc=__A ) == expected_map_nested_sa
assert map_nested(__A , __A , num_proc=__A ) == expected_map_nested_sa
| 42 |
from __future__ import annotations
def __lowerCamelCase ( UpperCAmelCase_ : str , UpperCAmelCase_ : list[str] | None = None , UpperCAmelCase_ : dict[str, float] | None = None , UpperCAmelCase_ : bool = False , ):
"""simple docstring"""
a :str = cipher_alphabet or [chr(UpperCAmelCase_ ) for i in range(97 , 123 )]
# If the argument is None or the user provided an empty dictionary
if not frequencies_dict:
# Frequencies of letters in the english language (how much they show up)
a :List[Any] = {
'''a''': 0.08497,
'''b''': 0.01492,
'''c''': 0.02202,
'''d''': 0.04253,
'''e''': 0.11162,
'''f''': 0.02228,
'''g''': 0.02015,
'''h''': 0.06094,
'''i''': 0.07546,
'''j''': 0.00153,
'''k''': 0.01292,
'''l''': 0.04025,
'''m''': 0.02406,
'''n''': 0.06749,
'''o''': 0.07507,
'''p''': 0.01929,
'''q''': 0.00095,
'''r''': 0.07587,
'''s''': 0.06327,
'''t''': 0.09356,
'''u''': 0.02758,
'''v''': 0.00978,
'''w''': 0.02560,
'''x''': 0.00150,
'''y''': 0.01994,
'''z''': 0.00077,
}
else:
# Custom frequencies dictionary
a :Dict = frequencies_dict
if not case_sensitive:
a :Union[str, Any] = ciphertext.lower()
# Chi squared statistic values
a :dict[int, tuple[float, str]] = {}
# cycle through all of the shifts
for shift in range(len(UpperCAmelCase_ ) ):
a :int = ''''''
# decrypt the message with the shift
for letter in ciphertext:
try:
# Try to index the letter in the alphabet
a :Dict = (alphabet_letters.index(letter.lower() ) - shift) % len(
UpperCAmelCase_ )
decrypted_with_shift += (
alphabet_letters[new_key].upper()
if case_sensitive and letter.isupper()
else alphabet_letters[new_key]
)
except ValueError:
# Append the character if it isn't in the alphabet
decrypted_with_shift += letter
a :List[Any] = 0.0
# Loop through each letter in the decoded message with the shift
for letter in decrypted_with_shift:
if case_sensitive:
a :Optional[int] = letter.lower()
if letter in frequencies:
# Get the amount of times the letter occurs in the message
a :List[Any] = decrypted_with_shift.lower().count(UpperCAmelCase_ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
a :Dict = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
a :Any = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
else:
if letter.lower() in frequencies:
# Get the amount of times the letter occurs in the message
a :int = decrypted_with_shift.count(UpperCAmelCase_ )
# Get the excepcted amount of times the letter should appear based
# on letter frequencies
a :Tuple = frequencies[letter] * occurrences
# Complete the chi squared statistic formula
a :Optional[Any] = ((occurrences - expected) ** 2) / expected
# Add the margin of error to the total chi squared statistic
chi_squared_statistic += chi_letter_value
# Add the data to the chi_squared_statistic_values dictionary
a :Optional[Any] = (
chi_squared_statistic,
decrypted_with_shift,
)
# Get the most likely cipher by finding the cipher with the smallest chi squared
# statistic
def chi_squared_statistic_values_sorting_key(UpperCAmelCase_ : int ) -> tuple[float, str]:
return chi_squared_statistic_values[key]
a :int = min(
UpperCAmelCase_ , key=UpperCAmelCase_ , )
# Get all the data from the most likely cipher (key, decoded message)
(
(
a
) , (
a
) ,
) :Optional[int] = chi_squared_statistic_values[most_likely_cipher]
# Return the data on the most likely shift
return (
most_likely_cipher,
most_likely_cipher_chi_squared_value,
decoded_most_likely_cipher,
)
| 94 | 0 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class __a ( A__ ):
def __init__( self : Optional[Any] , SCREAMING_SNAKE_CASE : str , SCREAMING_SNAKE_CASE : Optional[Any] ):
'''simple docstring'''
UpperCamelCase__ : List[Any] = params
UpperCamelCase__ : Tuple = np.array(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = np.array([len(SCREAMING_SNAKE_CASE ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : List[str] , SCREAMING_SNAKE_CASE : Union[str, Any] ):
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self : Optional[Any] ):
'''simple docstring'''
return len(self.lengths )
def __lowercase ( self : Union[str, Any] ):
'''simple docstring'''
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def __lowercase ( self : Any ):
'''simple docstring'''
UpperCamelCase__ : int = self.params.max_model_input_size
UpperCamelCase__ : str = self.lengths > max_len
logger.info(F'Splitting {sum(SCREAMING_SNAKE_CASE )} too long sequences.' )
def divide_chunks(SCREAMING_SNAKE_CASE : Union[str, Any] , SCREAMING_SNAKE_CASE : Optional[int] ):
return [l[i : i + n] for i in range(0 , len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )]
UpperCamelCase__ : Optional[int] = []
UpperCamelCase__ : Dict = []
if self.params.mlm:
UpperCamelCase__ : Tuple = self.params.special_tok_ids["cls_token"], self.params.special_tok_ids["sep_token"]
else:
UpperCamelCase__ : Tuple = self.params.special_tok_ids["bos_token"], self.params.special_tok_ids["eos_token"]
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
UpperCamelCase__ : Tuple = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
UpperCamelCase__ : Optional[int] = np.insert(SCREAMING_SNAKE_CASE , 0 , SCREAMING_SNAKE_CASE )
if sub_s[-1] != sep_id:
UpperCamelCase__ : Optional[Any] = np.insert(SCREAMING_SNAKE_CASE , len(SCREAMING_SNAKE_CASE ) , SCREAMING_SNAKE_CASE )
assert len(SCREAMING_SNAKE_CASE ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(SCREAMING_SNAKE_CASE )
new_tok_ids.extend(SCREAMING_SNAKE_CASE )
new_lengths.extend([len(SCREAMING_SNAKE_CASE ) for l in sub_seqs] )
UpperCamelCase__ : Union[str, Any] = np.array(SCREAMING_SNAKE_CASE )
UpperCamelCase__ : int = np.array(SCREAMING_SNAKE_CASE )
def __lowercase ( self : Dict ):
'''simple docstring'''
UpperCamelCase__ : str = len(self )
UpperCamelCase__ : Optional[int] = self.lengths > 11
UpperCamelCase__ : List[str] = self.token_ids[indices]
UpperCamelCase__ : int = self.lengths[indices]
UpperCamelCase__ : Any = len(self )
logger.info(F'Remove {init_size - new_size} too short (<=11 tokens) sequences.' )
def __lowercase ( self : Optional[Any] ):
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
UpperCamelCase__ : List[Any] = self.params.special_tok_ids["unk_token"]
UpperCamelCase__ : Dict = len(self )
UpperCamelCase__ : Union[str, Any] = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
UpperCamelCase__ : Dict = (unk_occs / self.lengths) < 0.5
UpperCamelCase__ : Union[str, Any] = self.token_ids[indices]
UpperCamelCase__ : Optional[Any] = self.lengths[indices]
UpperCamelCase__ : Any = len(self )
logger.info(F'Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).' )
def __lowercase ( self : Dict ):
'''simple docstring'''
if not self.params.is_master:
return
logger.info(F'{len(self )} sequences' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def __lowercase ( self : str , SCREAMING_SNAKE_CASE : List[str] ):
'''simple docstring'''
UpperCamelCase__ : Tuple = [t[0] for t in batch]
UpperCamelCase__ : Any = [t[1] for t in batch]
assert len(SCREAMING_SNAKE_CASE ) == len(SCREAMING_SNAKE_CASE )
# Max for paddings
UpperCamelCase__ : Union[str, Any] = max(SCREAMING_SNAKE_CASE )
# Pad token ids
if self.params.mlm:
UpperCamelCase__ : List[Any] = self.params.special_tok_ids["pad_token"]
else:
UpperCamelCase__ : Union[str, Any] = self.params.special_tok_ids["unk_token"]
UpperCamelCase__ : Tuple = [list(t.astype(SCREAMING_SNAKE_CASE ) ) + [pad_idx] * (max_seq_len_ - len(SCREAMING_SNAKE_CASE )) for t in token_ids]
assert len(tk_ ) == len(SCREAMING_SNAKE_CASE )
assert all(len(SCREAMING_SNAKE_CASE ) == max_seq_len_ for t in tk_ )
UpperCamelCase__ : int = torch.tensor(tk_ ) # (bs, max_seq_len_)
UpperCamelCase__ : List[Any] = torch.tensor(SCREAMING_SNAKE_CASE ) # (bs)
return tk_t, lg_t | 359 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( __lowerCAmelCase , __lowerCAmelCase ) -> bool:
if len(__lowerCAmelCase ) == 0:
return False
UpperCamelCase__ : Any = len(__lowerCAmelCase ) // 2
if a_list[midpoint] == item:
return True
if item < a_list[midpoint]:
return binary_search(a_list[:midpoint] , __lowerCAmelCase )
else:
return binary_search(a_list[midpoint + 1 :] , __lowerCAmelCase )
if __name__ == "__main__":
lowerCamelCase : Any =input('''Enter numbers separated by comma:\n''').strip()
lowerCamelCase : Dict =[int(item.strip()) for item in user_input.split(''',''')]
lowerCamelCase : List[str] =int(input('''Enter the number to be found in the list:\n''').strip())
lowerCamelCase : Union[str, Any] ='''''' if binary_search(sequence, target) else '''not '''
print(F"""{target} was {not_str}found in {sequence}""") | 196 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from torch import nn
from torch.nn import CrossEntropyLoss
from ... import AutoBackbone
from ...modeling_outputs import SemanticSegmenterOutput
from ...modeling_utils import PreTrainedModel
from ...utils import add_start_docstrings, add_start_docstrings_to_model_forward, replace_return_docstrings
from ...utils.backbone_utils import BackboneMixin
from .configuration_upernet import UperNetConfig
lowercase_ = [
"openmmlab/upernet-convnext-tiny",
# See all UperNet models at https://huggingface.co/models?filter=upernet
]
# General docstring
lowercase_ = "UperNetConfig"
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a = 0 , _a = False , _a = 1 , ):
super().__init__()
__a = nn.Convad(
in_channels=_a , out_channels=_a , kernel_size=_a , padding=_a , bias=_a , dilation=_a , )
__a = nn.BatchNormad(_a )
__a = nn.ReLU()
def __UpperCAmelCase ( self , _a ):
__a = self.conv(_a )
__a = self.batch_norm(_a )
__a = self.activation(_a )
return output
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a , _a , _a ):
super().__init__()
__a = [
nn.AdaptiveAvgPoolad(_a ),
UperNetConvModule(_a , _a , kernel_size=1 ),
]
for i, layer in enumerate(self.layers ):
self.add_module(str(_a ) , _a )
def __UpperCAmelCase ( self , _a ):
__a = input
for layer in self.layers:
__a = layer(_a )
return hidden_state
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a , _a , _a , _a ):
super().__init__()
__a = pool_scales
__a = align_corners
__a = in_channels
__a = channels
__a = []
for i, pool_scale in enumerate(_a ):
__a = UperNetPyramidPoolingBlock(pool_scale=_a , in_channels=_a , channels=_a )
self.blocks.append(_a )
self.add_module(str(_a ) , _a )
def __UpperCAmelCase ( self , _a ):
__a = []
for ppm in self.blocks:
__a = ppm(_a )
__a = nn.functional.interpolate(
_a , size=x.size()[2:] , mode='''bilinear''' , align_corners=self.align_corners )
ppm_outs.append(_a )
return ppm_outs
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a , _a ):
super().__init__()
__a = config
__a = config.pool_scales # e.g. (1, 2, 3, 6)
__a = in_channels
__a = config.hidden_size
__a = False
__a = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
# PSP Module
__a = UperNetPyramidPoolingModule(
self.pool_scales , self.in_channels[-1] , self.channels , align_corners=self.align_corners , )
__a = UperNetConvModule(
self.in_channels[-1] + len(self.pool_scales ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
# FPN Module
__a = nn.ModuleList()
__a = nn.ModuleList()
for in_channels in self.in_channels[:-1]: # skip the top layer
__a = UperNetConvModule(_a , self.channels , kernel_size=1 )
__a = UperNetConvModule(self.channels , self.channels , kernel_size=3 , padding=1 )
self.lateral_convs.append(_a )
self.fpn_convs.append(_a )
__a = UperNetConvModule(
len(self.in_channels ) * self.channels , self.channels , kernel_size=3 , padding=1 , )
def __UpperCAmelCase ( self ):
self.apply(self._init_weights )
def __UpperCAmelCase ( self , _a ):
if isinstance(_a , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __UpperCAmelCase ( self , _a ):
__a = inputs[-1]
__a = [x]
psp_outs.extend(self.psp_modules(_a ) )
__a = torch.cat(_a , dim=1 )
__a = self.bottleneck(_a )
return output
def __UpperCAmelCase ( self , _a ):
# build laterals
__a = [lateral_conv(encoder_hidden_states[i] ) for i, lateral_conv in enumerate(self.lateral_convs )]
laterals.append(self.psp_forward(_a ) )
# build top-down path
__a = len(_a )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
__a = laterals[i - 1].shape[2:]
__a = laterals[i - 1] + nn.functional.interpolate(
laterals[i] , size=_a , mode='''bilinear''' , align_corners=self.align_corners )
# build outputs
__a = [self.fpn_convs[i](laterals[i] ) for i in range(used_backbone_levels - 1 )]
# append psp feature
fpn_outs.append(laterals[-1] )
for i in range(used_backbone_levels - 1 , 0 , -1 ):
__a = nn.functional.interpolate(
fpn_outs[i] , size=fpn_outs[0].shape[2:] , mode='''bilinear''' , align_corners=self.align_corners )
__a = torch.cat(_a , dim=1 )
__a = self.fpn_bottleneck(_a )
__a = self.classifier(_a )
return output
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , _a , _a = 2 , _a = 3 , _a = 1 ):
super().__init__()
__a = config
__a = config.auxiliary_in_channels
__a = config.auxiliary_channels
__a = config.auxiliary_num_convs
__a = config.auxiliary_concat_input
__a = in_index
__a = (kernel_size // 2) * dilation
__a = []
convs.append(
UperNetConvModule(
self.in_channels , self.channels , kernel_size=_a , padding=_a , dilation=_a ) )
for i in range(self.num_convs - 1 ):
convs.append(
UperNetConvModule(
self.channels , self.channels , kernel_size=_a , padding=_a , dilation=_a ) )
if self.num_convs == 0:
__a = nn.Identity()
else:
__a = nn.Sequential(*_a )
if self.concat_input:
__a = UperNetConvModule(
self.in_channels + self.channels , self.channels , kernel_size=_a , padding=kernel_size // 2 )
__a = nn.Convad(self.channels , config.num_labels , kernel_size=1 )
def __UpperCAmelCase ( self ):
self.apply(self._init_weights )
def __UpperCAmelCase ( self , _a ):
if isinstance(_a , nn.Convad ):
module.weight.data.normal_(mean=0.0 , std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
def __UpperCAmelCase ( self , _a ):
# just take the relevant feature maps
__a = encoder_hidden_states[self.in_index]
__a = self.convs(_a )
if self.concat_input:
__a = self.conv_cat(torch.cat([hidden_states, output] , dim=1 ) )
__a = self.classifier(_a )
return output
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
__UpperCAmelCase : int = UperNetConfig
__UpperCAmelCase : Union[str, Any] = 'pixel_values'
__UpperCAmelCase : Dict = True
def __UpperCAmelCase ( self , _a ):
if isinstance(_a , _a ):
module.backbone.init_weights()
module.decode_head.init_weights()
module.auxiliary_head.init_weights()
def __UpperCAmelCase ( self ):
self.backbone.init_weights()
self.decode_head.init_weights()
self.auxiliary_head.init_weights()
def __UpperCAmelCase ( self , _a , _a=False ):
if isinstance(_a , _a ):
__a = value
lowercase_ = r"\n Parameters:\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n config ([`UperNetConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n"
lowercase_ = r"\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using\n [`AutoImageProcessor`]. See [`SegformerImageProcessor.__call__`] for details.\n output_attentions (`bool`, *optional*):\n Whether or not to return the attentions tensors of all attention layers in case the backbone has them. See\n `attentions` under returned tensors for more detail.\n output_hidden_states (`bool`, *optional*):\n Whether or not to return the hidden states of all layers of the backbone. See `hidden_states` under\n returned tensors for more detail.\n return_dict (`bool`, *optional*):\n Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.\n"
@add_start_docstrings(
'UperNet framework leveraging any vision backbone e.g. for ADE20k, CityScapes.' , __SCREAMING_SNAKE_CASE , )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self , _a ):
super().__init__(_a )
__a = AutoBackbone.from_config(config.backbone_config )
# Semantic segmentation head(s)
__a = UperNetHead(_a , in_channels=self.backbone.channels )
__a = UperNetFCNHead(_a ) if config.use_auxiliary_head else None
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(UPERNET_INPUTS_DOCSTRING.format('''batch_size, sequence_length''' ) )
@replace_return_docstrings(output_type=_a , config_class=_CONFIG_FOR_DOC )
def __UpperCAmelCase ( self , _a = None , _a = None , _a = None , _a = None , _a = None , ):
__a = return_dict if return_dict is not None else self.config.use_return_dict
__a = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__a = output_attentions if output_attentions is not None else self.config.output_attentions
__a = self.backbone.forward_with_filtered_kwargs(
_a , output_hidden_states=_a , output_attentions=_a )
__a = outputs.feature_maps
__a = self.decode_head(_a )
__a = nn.functional.interpolate(_a , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=_a )
__a = None
if self.auxiliary_head is not None:
__a = self.auxiliary_head(_a )
__a = nn.functional.interpolate(
_a , size=pixel_values.shape[2:] , mode='''bilinear''' , align_corners=_a )
__a = None
if labels is not None:
if self.config.num_labels == 1:
raise ValueError('''The number of labels should be greater than one''' )
else:
# compute weighted loss
__a = CrossEntropyLoss(ignore_index=self.config.loss_ignore_index )
__a = loss_fct(_a , _a )
__a = loss_fct(_a , _a )
__a = main_loss + self.config.auxiliary_loss_weight * auxiliary_loss
if not return_dict:
if output_hidden_states:
__a = (logits,) + outputs[1:]
else:
__a = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return SemanticSegmenterOutput(
loss=_a , logits=_a , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 45 |
'''simple docstring'''
import json
import os
from pathlib import Path
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple, Union
import sentencepiece
from ...tokenization_utils import BatchEncoding, PreTrainedTokenizer
from ...utils import logging
lowercase_ = logging.get_logger(__name__)
lowercase_ = "▁"
lowercase_ = {
"vocab_file": "vocab.json",
"spm_file": "sentencepiece.bpe.model",
"tokenizer_config_file": "tokenizer_config.json",
}
lowercase_ = {
"vocab_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/vocab.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/vocab.json",
},
"spm_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/sentencepiece.bpe.model",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/sentencepiece.bpe.model",
},
"tokenizer_config_file": {
"facebook/m2m100_418M": "https://huggingface.co/facebook/m2m100_418M/resolve/main/tokenizer_config.json",
"facebook/m2m100_1.2B": "https://huggingface.co/facebook/m2m100_1.2B/resolve/main/tokenizer_config.json",
},
}
lowercase_ = {
"facebook/m2m100_418M": 1_024,
}
# fmt: off
lowercase_ = {
"m2m100": ["af", "am", "ar", "ast", "az", "ba", "be", "bg", "bn", "br", "bs", "ca", "ceb", "cs", "cy", "da", "de", "el", "en", "es", "et", "fa", "ff", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "he", "hi", "hr", "ht", "hu", "hy", "id", "ig", "ilo", "is", "it", "ja", "jv", "ka", "kk", "km", "kn", "ko", "lb", "lg", "ln", "lo", "lt", "lv", "mg", "mk", "ml", "mn", "mr", "ms", "my", "ne", "nl", "no", "ns", "oc", "or", "pa", "pl", "ps", "pt", "ro", "ru", "sd", "si", "sk", "sl", "so", "sq", "sr", "ss", "su", "sv", "sw", "ta", "th", "tl", "tn", "tr", "uk", "ur", "uz", "vi", "wo", "xh", "yi", "yo", "zh", "zu"],
"wmt21": ["en", "ha", "is", "ja", "cs", "ru", "zh", "de"]
}
class __A ( A ):
'''simple docstring'''
__lowerCamelCase : Union[str, Any] = VOCAB_FILES_NAMES
__lowerCamelCase : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__lowerCamelCase : Tuple = PRETRAINED_VOCAB_FILES_MAP
__lowerCamelCase : Dict = ['input_ids', 'attention_mask']
__lowerCamelCase : List[int] = []
__lowerCamelCase : List[int] = []
def __init__(self , A , A , A=None , A=None , A="<s>" , A="</s>" , A="</s>" , A="<pad>" , A="<unk>" , A="m2m100" , A = None , A=8 , **A , ) -> None:
"""simple docstring"""
_a = {} if sp_model_kwargs is None else sp_model_kwargs
_a = language_codes
_a = FAIRSEQ_LANGUAGE_CODES[language_codes]
_a = {lang_code: f'''__{lang_code}__''' for lang_code in fairseq_language_code}
_a = kwargs.get('''additional_special_tokens''' , [] )
kwargs["additional_special_tokens"] += [
self.get_lang_token(A )
for lang_code in fairseq_language_code
if self.get_lang_token(A ) not in kwargs["additional_special_tokens"]
]
super().__init__(
src_lang=A , tgt_lang=A , bos_token=A , eos_token=A , sep_token=A , unk_token=A , pad_token=A , language_codes=A , sp_model_kwargs=self.sp_model_kwargs , num_madeup_words=A , **A , )
_a = vocab_file
_a = load_json(A )
_a = {v: k for k, v in self.encoder.items()}
_a = spm_file
_a = load_spm(A , self.sp_model_kwargs )
_a = len(self.encoder )
_a = {
self.get_lang_token(A ): self.encoder_size + i for i, lang_code in enumerate(A )
}
_a = {lang_code: self.encoder_size + i for i, lang_code in enumerate(A )}
_a = {v: k for k, v in self.lang_token_to_id.items()}
_a = src_lang if src_lang is not None else '''en'''
_a = tgt_lang
_a = self.get_lang_id(self._src_lang )
self.set_src_lang_special_tokens(self._src_lang )
_a = num_madeup_words
@property
def a__ (self ) -> int:
"""simple docstring"""
return len(self.encoder ) + len(self.lang_token_to_id )
@property
def a__ (self ) -> str:
"""simple docstring"""
return self._src_lang
@src_lang.setter
def a__ (self , A ) -> None:
"""simple docstring"""
_a = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def a__ (self , A ) -> List[str]:
"""simple docstring"""
return self.sp_model.encode(A , out_type=A )
def a__ (self , A ) -> Union[str, Any]:
"""simple docstring"""
if token in self.lang_token_to_id:
return self.lang_token_to_id[token]
return self.encoder.get(A , self.encoder[self.unk_token] )
def a__ (self , A ) -> str:
"""simple docstring"""
if index in self.id_to_lang_token:
return self.id_to_lang_token[index]
return self.decoder.get(A , self.unk_token )
def a__ (self , A ) -> Dict:
"""simple docstring"""
_a = []
_a = ''''''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A ) + token
_a = []
else:
current_sub_tokens.append(A )
out_string += self.sp_model.decode(A )
return out_string.strip()
def a__ (self , A , A = None , A = False ) -> List[int]:
"""simple docstring"""
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A , token_ids_a=A , already_has_special_tokens=A )
_a = [1] * len(self.prefix_tokens )
_a = [1] * len(self.suffix_tokens )
if token_ids_a is None:
return prefix_ones + ([0] * len(A )) + suffix_ones
return prefix_ones + ([0] * len(A )) + ([0] * len(A )) + suffix_ones
def a__ (self , A , A = None ) -> List[int]:
"""simple docstring"""
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def a__ (self ) -> Dict:
"""simple docstring"""
_a = {self.convert_ids_to_tokens(A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__(self ) -> Dict:
"""simple docstring"""
_a = self.__dict__.copy()
_a = None
return state
def __setstate__(self , A ) -> None:
"""simple docstring"""
_a = d
# for backward compatibility
if not hasattr(self , '''sp_model_kwargs''' ):
_a = {}
_a = load_spm(self.spm_file , self.sp_model_kwargs )
def a__ (self , A , A = None ) -> Tuple[str]:
"""simple docstring"""
_a = Path(A )
if not save_dir.is_dir():
raise OSError(f'''{save_directory} should be a directory''' )
_a = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''vocab_file''']
)
_a = save_dir / (
(filename_prefix + '''-''' if filename_prefix else '''''') + self.vocab_files_names['''spm_file''']
)
save_json(self.encoder , A )
if os.path.abspath(self.spm_file ) != os.path.abspath(A ) and os.path.isfile(self.spm_file ):
copyfile(self.spm_file , A )
elif not os.path.isfile(self.spm_file ):
with open(A , '''wb''' ) as fi:
_a = self.sp_model.serialized_model_proto()
fi.write(A )
return (str(A ), str(A ))
def a__ (self , A , A = "en" , A = None , A = "ro" , **A , ) -> BatchEncoding:
"""simple docstring"""
_a = src_lang
_a = tgt_lang
self.set_src_lang_special_tokens(self.src_lang )
return super().prepare_seqaseq_batch(A , A , **A )
def a__ (self , A , A , A , **A ) -> Union[str, Any]:
"""simple docstring"""
if src_lang is None or tgt_lang is None:
raise ValueError('''Translation requires a `src_lang` and a `tgt_lang` for this model''' )
_a = src_lang
_a = self(A , add_special_tokens=A , **A )
_a = self.get_lang_id(A )
_a = tgt_lang_id
return inputs
def a__ (self ) -> Optional[Any]:
"""simple docstring"""
self.set_src_lang_special_tokens(self.src_lang )
def a__ (self ) -> Tuple:
"""simple docstring"""
self.set_tgt_lang_special_tokens(self.tgt_lang )
def a__ (self , A ) -> None:
"""simple docstring"""
_a = self.get_lang_token(A )
_a = self.lang_token_to_id[lang_token]
_a = [self.cur_lang_id]
_a = [self.eos_token_id]
def a__ (self , A ) -> None:
"""simple docstring"""
_a = self.get_lang_token(A )
_a = self.lang_token_to_id[lang_token]
_a = [self.cur_lang_id]
_a = [self.eos_token_id]
def a__ (self , A ) -> str:
"""simple docstring"""
return self.lang_code_to_token[lang]
def a__ (self , A ) -> int:
"""simple docstring"""
_a = self.get_lang_token(A )
return self.lang_token_to_id[lang_token]
def lowerCAmelCase (__A , __A):
"""simple docstring"""
_a = sentencepiece.SentencePieceProcessor(**__A)
spm.Load(str(__A))
return spm
def lowerCAmelCase (__A):
"""simple docstring"""
with open(__A , '''r''') as f:
return json.load(__A)
def lowerCAmelCase (__A , __A):
"""simple docstring"""
with open(__A , '''w''') as f:
json.dump(__A , __A , indent=2)
| 211 | 0 |
"""simple docstring"""
from PIL import Image
def lowerCamelCase ( _UpperCamelCase : Image , _UpperCamelCase : int ) -> Image:
'''simple docstring'''
__UpperCAmelCase : List[str] = (2_5_9 * (level + 2_5_5)) / (2_5_5 * (2_5_9 - level))
def contrast(_UpperCamelCase : int ) -> int:
return int(1_2_8 + factor * (c - 1_2_8) )
return img.point(_UpperCamelCase )
if __name__ == "__main__":
# Load image
with Image.open('image_data/lena.jpg') as img:
# Change contrast to 170
UpperCAmelCase : Any = change_contrast(img, 170)
cont_img.save('image_data/lena_high_contrast.png', format='png')
| 320 |
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class lowerCamelCase__ :
"""simple docstring"""
@staticmethod
def lowerCamelCase__ ( *UpperCamelCase : Optional[Any] , **UpperCamelCase : Dict ):
'''simple docstring'''
pass
def lowerCamelCase ( _UpperCamelCase : Image ) -> str:
'''simple docstring'''
__UpperCAmelCase : Tuple = hashlib.mda(image.tobytes() )
return m.hexdigest()[:1_0]
def lowerCamelCase ( _UpperCamelCase : Image ) -> Dict:
'''simple docstring'''
__UpperCAmelCase : Tuple = np.array(_UpperCamelCase )
__UpperCAmelCase : List[Any] = npimg.shape
return {"hash": hashimage(_UpperCamelCase ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class lowerCamelCase__ ( unittest.TestCase ):
"""simple docstring"""
__a = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
__a = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def lowerCamelCase__ ( self : Tuple , UpperCamelCase : str , UpperCamelCase : Optional[Any] , UpperCamelCase : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Optional[Any] = MaskGenerationPipeline(model=UpperCamelCase , image_processor=UpperCamelCase )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def lowerCamelCase__ ( self : Optional[int] , UpperCamelCase : Dict , UpperCamelCase : List[Any] ):
'''simple docstring'''
pass
@require_tf
@unittest.skip("""Image segmentation not implemented in TF""" )
def lowerCamelCase__ ( self : List[str] ):
'''simple docstring'''
pass
@slow
@require_torch
def lowerCamelCase__ ( self : Tuple ):
'''simple docstring'''
__UpperCAmelCase : Tuple = pipeline("""mask-generation""" , model="""facebook/sam-vit-huge""" )
__UpperCAmelCase : Any = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" , points_per_batch=256 )
# Shortening by hashing
__UpperCAmelCase : int = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(UpperCamelCase ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.021},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053},
{"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (480, 640)}, """scores""": 0.9967},
{"""mask""": {"""hash""": """453c7844bd""", """shape""": (480, 640)}, """scores""": 0.993},
{"""mask""": {"""hash""": """3d44f2926d""", """shape""": (480, 640)}, """scores""": 0.9909},
{"""mask""": {"""hash""": """64033ddc3f""", """shape""": (480, 640)}, """scores""": 0.9879},
{"""mask""": {"""hash""": """801064ff79""", """shape""": (480, 640)}, """scores""": 0.9834},
{"""mask""": {"""hash""": """6172f276ef""", """shape""": (480, 640)}, """scores""": 0.9716},
{"""mask""": {"""hash""": """b49e60e084""", """shape""": (480, 640)}, """scores""": 0.9612},
{"""mask""": {"""hash""": """a811e775fd""", """shape""": (480, 640)}, """scores""": 0.9599},
{"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (480, 640)}, """scores""": 0.9552},
{"""mask""": {"""hash""": """9d8257e080""", """shape""": (480, 640)}, """scores""": 0.9532},
{"""mask""": {"""hash""": """32de6454a8""", """shape""": (480, 640)}, """scores""": 0.9516},
{"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (480, 640)}, """scores""": 0.9499},
{"""mask""": {"""hash""": """3c6db475fb""", """shape""": (480, 640)}, """scores""": 0.9483},
{"""mask""": {"""hash""": """c290813fb9""", """shape""": (480, 640)}, """scores""": 0.9464},
{"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (480, 640)}, """scores""": 0.943},
{"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (480, 640)}, """scores""": 0.943},
{"""mask""": {"""hash""": """c749b25868""", """shape""": (480, 640)}, """scores""": 0.9408},
{"""mask""": {"""hash""": """efb6cab859""", """shape""": (480, 640)}, """scores""": 0.9335},
{"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (480, 640)}, """scores""": 0.9326},
{"""mask""": {"""hash""": """788b798e24""", """shape""": (480, 640)}, """scores""": 0.9262},
{"""mask""": {"""hash""": """abea804f0e""", """shape""": (480, 640)}, """scores""": 0.8999},
{"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (480, 640)}, """scores""": 0.8986},
{"""mask""": {"""hash""": """cd24047c8a""", """shape""": (480, 640)}, """scores""": 0.8984},
{"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (480, 640)}, """scores""": 0.8873},
{"""mask""": {"""hash""": """b5f47c9191""", """shape""": (480, 640)}, """scores""": 0.8871}
] , )
# fmt: on
@require_torch
@slow
def lowerCamelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__UpperCAmelCase : Any = """facebook/sam-vit-huge"""
__UpperCAmelCase : str = pipeline("""mask-generation""" , model=UpperCamelCase )
__UpperCAmelCase : int = image_segmenter(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , pred_iou_thresh=1 , points_per_batch=256 )
# Shortening by hashing
__UpperCAmelCase : Dict = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(UpperCamelCase ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(UpperCamelCase , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (480, 640)}, """scores""": 1.0444},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (480, 640)}, """scores""": 1.0210},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (480, 640)}, """scores""": 1.0167},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (480, 640)}, """scores""": 1.0132},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (480, 640)}, """scores""": 1.0053},
] , )
| 320 | 1 |
'''simple docstring'''
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class A__ ( unittest.TestCase ):
__UpperCamelCase : Dict = inspect.getfile(accelerate.test_utils )
__UpperCamelCase : Dict = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_cli.py"] )
__UpperCamelCase : List[str] = ["accelerate", "launch"]
__UpperCamelCase : List[str] = Path.home() / ".cache/huggingface/accelerate"
__UpperCamelCase : Optional[int] = "default_config.yaml"
__UpperCamelCase : Optional[Any] = config_folder / config_file
__UpperCamelCase : int = config_folder / "_default_config.yaml"
__UpperCamelCase : Union[str, Any] = Path("tests/test_configs" )
@classmethod
def __UpperCAmelCase ( cls :List[Any] ) -> List[str]:
'''simple docstring'''
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def __UpperCAmelCase ( cls :int ) -> Union[str, Any]:
'''simple docstring'''
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def __UpperCAmelCase ( self :List[Any] ) -> Any:
'''simple docstring'''
_a : Dict =self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def __UpperCAmelCase ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
for config in sorted(self.test_config_path.glob("""**/*.yaml""" ) ):
with self.subTest(config_file=SCREAMING_SNAKE_CASE ):
execute_subprocess_async(
self.base_cmd + ["""--config_file""", str(SCREAMING_SNAKE_CASE ), self.test_file_path] , env=os.environ.copy() )
def __UpperCAmelCase ( self :Any ) -> int:
'''simple docstring'''
execute_subprocess_async(["""accelerate""", """test"""] , env=os.environ.copy() )
class A__ ( unittest.TestCase ):
__UpperCamelCase : List[str] = "test-tpu"
__UpperCamelCase : Dict = "us-central1-a"
__UpperCamelCase : Any = "ls"
__UpperCamelCase : int = ["accelerate", "tpu-config"]
__UpperCamelCase : str = "cd /usr/share"
__UpperCamelCase : str = "tests/test_samples/test_command_file.sh"
__UpperCamelCase : List[Any] = "Running gcloud compute tpus tpu-vm ssh"
def __UpperCAmelCase ( self :Tuple ) -> int:
'''simple docstring'''
_a : Optional[int] =run_command(
self.cmd
+ ["""--command""", self.command, """--tpu_zone""", self.tpu_zone, """--tpu_name""", self.tpu_name, """--debug"""] , return_stdout=SCREAMING_SNAKE_CASE , )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all" , SCREAMING_SNAKE_CASE , )
def __UpperCAmelCase ( self :Tuple ) -> List[str]:
'''simple docstring'''
_a : Optional[Any] =run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/0_12_0.yaml""",
"""--command""",
self.command,
"""--tpu_zone""",
self.tpu_zone,
"""--tpu_name""",
self.tpu_name,
"""--debug""",
] , return_stdout=SCREAMING_SNAKE_CASE , )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all" , SCREAMING_SNAKE_CASE , )
def __UpperCAmelCase ( self :List[str] ) -> Optional[int]:
'''simple docstring'''
_a : str =run_command(
self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--debug"""] , return_stdout=SCREAMING_SNAKE_CASE )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all" , SCREAMING_SNAKE_CASE , )
def __UpperCAmelCase ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_a : str =run_command(
self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--command""", self.command, """--debug"""] , return_stdout=SCREAMING_SNAKE_CASE , )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all" , SCREAMING_SNAKE_CASE , )
def __UpperCAmelCase ( self :Union[str, Any] ) -> List[Any]:
'''simple docstring'''
_a : Optional[Any] =run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/latest.yaml""",
"""--command""",
self.command,
"""--command""",
"""echo \"Hello World\"""",
"""--debug""",
] , return_stdout=SCREAMING_SNAKE_CASE , )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo \"Hello World\" --worker all" , SCREAMING_SNAKE_CASE , )
def __UpperCAmelCase ( self :List[Any] ) -> Union[str, Any]:
'''simple docstring'''
_a : str =run_command(
self.cmd
+ ["""--config_file""", """tests/test_configs/latest.yaml""", """--command_file""", self.command_file, """--debug"""] , return_stdout=SCREAMING_SNAKE_CASE , )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all" , SCREAMING_SNAKE_CASE , )
def __UpperCAmelCase ( self :Union[str, Any] ) -> Optional[Any]:
'''simple docstring'''
_a : Any =run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/0_12_0.yaml""",
"""--command_file""",
self.command_file,
"""--tpu_zone""",
self.tpu_zone,
"""--tpu_name""",
self.tpu_name,
"""--debug""",
] , return_stdout=SCREAMING_SNAKE_CASE , )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo \"hello world\"; echo \"this is a second command\" --worker all" , SCREAMING_SNAKE_CASE , )
def __UpperCAmelCase ( self :Union[str, Any] ) -> int:
'''simple docstring'''
_a : Any =run_command(
self.cmd + ["""--config_file""", """tests/test_configs/latest.yaml""", """--install_accelerate""", """--debug"""] , return_stdout=SCREAMING_SNAKE_CASE , )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo \"hello world\"; echo \"this is a second command\" --worker all" , SCREAMING_SNAKE_CASE , )
def __UpperCAmelCase ( self :Tuple ) -> Dict:
'''simple docstring'''
_a : Any =run_command(
self.cmd
+ [
"""--config_file""",
"""tests/test_configs/latest.yaml""",
"""--install_accelerate""",
"""--accelerate_version""",
"""12.0.0""",
"""--debug""",
] , return_stdout=SCREAMING_SNAKE_CASE , )
self.assertIn(
f"{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo \"hello world\"; echo \"this is a second command\" --worker all" , SCREAMING_SNAKE_CASE , )
| 276 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
A__: str = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
A__: Tuple = ['''GPTSw3Tokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_gpt_swa import GPTSwaTokenizer
else:
import sys
A__: str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 276 | 1 |
"""simple docstring"""
import argparse
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
lowerCAmelCase_ : Union[str, Any] = 1_6
lowerCAmelCase_ : Optional[Any] = 3_2
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase = 16 ):
'''simple docstring'''
UpperCAmelCase = AutoTokenizer.from_pretrained("""bert-base-cased""" )
UpperCAmelCase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(lowerCAmelCase ):
# max_length=None => use the model max length (it's actually the default)
UpperCAmelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCAmelCase , max_length=lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
UpperCAmelCase = datasets.map(
lowerCAmelCase , batched=lowerCAmelCase , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
UpperCAmelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(lowerCAmelCase ):
# On TPU it's best to pad everything to the same length or training will be very slow.
UpperCAmelCase = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
UpperCAmelCase = 16
elif accelerator.mixed_precision != "no":
UpperCAmelCase = 8
else:
UpperCAmelCase = None
return tokenizer.pad(
lowerCAmelCase , padding="""longest""" , max_length=lowerCAmelCase , pad_to_multiple_of=lowerCAmelCase , return_tensors="""pt""" , )
# Instantiate dataloaders.
UpperCAmelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=lowerCAmelCase , collate_fn=lowerCAmelCase , batch_size=lowerCAmelCase , drop_last=lowerCAmelCase )
UpperCAmelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=lowerCAmelCase , collate_fn=lowerCAmelCase , batch_size=lowerCAmelCase , drop_last=(accelerator.mixed_precision == """fp8""") , )
return train_dataloader, eval_dataloader
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
# Initialize accelerator
UpperCAmelCase = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
UpperCAmelCase = config["""lr"""]
UpperCAmelCase = int(config["""num_epochs"""] )
UpperCAmelCase = int(config["""seed"""] )
UpperCAmelCase = int(config["""batch_size"""] )
UpperCAmelCase = evaluate.load("""glue""" , """mrpc""" )
# If the batch size is too big we use gradient accumulation
UpperCAmelCase = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
UpperCAmelCase = batch_size // MAX_GPU_BATCH_SIZE
UpperCAmelCase = MAX_GPU_BATCH_SIZE
set_seed(lowerCAmelCase )
UpperCAmelCase , UpperCAmelCase = get_dataloaders(lowerCAmelCase , lowerCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained("""bert-base-cased""" , return_dict=lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
UpperCAmelCase = model.to(accelerator.device )
# Instantiate optimizer
UpperCAmelCase = AdamW(params=model.parameters() , lr=lowerCAmelCase )
# Instantiate scheduler
UpperCAmelCase = get_linear_schedule_with_warmup(
optimizer=lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase , UpperCAmelCase = accelerator.prepare(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# Now we train the model
for epoch in range(lowerCAmelCase ):
model.train()
for step, batch in enumerate(lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
UpperCAmelCase = model(**lowerCAmelCase )
UpperCAmelCase = outputs.loss
UpperCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
for step, batch in enumerate(lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
UpperCAmelCase = model(**lowerCAmelCase )
UpperCAmelCase = outputs.logits.argmax(dim=-1 )
UpperCAmelCase , UpperCAmelCase = accelerator.gather_for_metrics((predictions, batch["""labels"""]) )
metric.add_batch(
predictions=lowerCAmelCase , references=lowerCAmelCase , )
UpperCAmelCase = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , lowerCAmelCase )
def _lowerCAmelCase ( ):
'''simple docstring'''
UpperCAmelCase = argparse.ArgumentParser(description="""Simple example of training script.""" )
parser.add_argument(
"""--mixed_precision""" , type=lowerCAmelCase , default=lowerCAmelCase , choices=["""no""", """fp16""", """bf16""", """fp8"""] , help="""Whether to use mixed precision. Choose"""
"""between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."""
"""and an Nvidia Ampere GPU.""" , )
parser.add_argument("""--cpu""" , action="""store_true""" , help="""If passed, will train on the CPU.""" )
UpperCAmelCase = parser.parse_args()
UpperCAmelCase = {"""lr""": 2e-5, """num_epochs""": 3, """seed""": 42, """batch_size""": 16}
training_function(lowerCAmelCase , lowerCAmelCase )
if __name__ == "__main__":
main()
| 248 |
"""simple docstring"""
import math
import random
from typing import Any
from .hill_climbing import SearchProblem
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase = True , lowerCAmelCase = math.inf , lowerCAmelCase = -math.inf , lowerCAmelCase = math.inf , lowerCAmelCase = -math.inf , lowerCAmelCase = False , lowerCAmelCase = 100 , lowerCAmelCase = 0.01 , lowerCAmelCase = 1 , ):
'''simple docstring'''
UpperCAmelCase = False
UpperCAmelCase = search_prob
UpperCAmelCase = start_temperate
UpperCAmelCase = []
UpperCAmelCase = 0
UpperCAmelCase = None
while not search_end:
UpperCAmelCase = current_state.score()
if best_state is None or current_score > best_state.score():
UpperCAmelCase = current_state
scores.append(lowerCAmelCase )
iterations += 1
UpperCAmelCase = None
UpperCAmelCase = current_state.get_neighbors()
while (
next_state is None and neighbors
): # till we do not find a neighbor that we can move to
UpperCAmelCase = random.randint(0 , len(lowerCAmelCase ) - 1 ) # picking a random neighbor
UpperCAmelCase = neighbors.pop(lowerCAmelCase )
UpperCAmelCase = picked_neighbor.score() - current_score
if (
picked_neighbor.x > max_x
or picked_neighbor.x < min_x
or picked_neighbor.y > max_y
or picked_neighbor.y < min_y
):
continue # neighbor outside our bounds
if not find_max:
UpperCAmelCase = change * -1 # in case we are finding minimum
if change > 0: # improves the solution
UpperCAmelCase = picked_neighbor
else:
UpperCAmelCase = (math.e) ** (
change / current_temp
) # probability generation function
if random.random() < probability: # random number within probability
UpperCAmelCase = picked_neighbor
UpperCAmelCase = current_temp - (current_temp * rate_of_decrease)
if current_temp < threshold_temp or next_state is None:
# temperature below threshold, or could not find a suitable neighbor
UpperCAmelCase = True
else:
UpperCAmelCase = next_state
if visualization:
from matplotlib import pyplot as plt
plt.plot(range(lowerCAmelCase ) , lowerCAmelCase )
plt.xlabel("""Iterations""" )
plt.ylabel("""Function values""" )
plt.show()
return best_state
if __name__ == "__main__":
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
return (x**2) + (y**2)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase_ : List[str] = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ : List[str] = simulated_annealing(
prob, find_max=False, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
'''The minimum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
# starting the problem with initial coordinates (12, 47)
lowerCAmelCase_ : int = SearchProblem(x=1_2, y=4_7, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ : Optional[Any] = simulated_annealing(
prob, find_max=True, max_x=1_0_0, min_x=5, max_y=5_0, min_y=-5, visualization=True
)
print(
'''The maximum score for f(x, y) = x^2 + y^2 with the domain 100 > x > 5 '''
F'and 50 > y > - 5 found via hill climbing: {local_min.score()}'
)
def _lowerCAmelCase ( lowerCAmelCase , lowerCAmelCase ):
'''simple docstring'''
return (3 * x**2) - (6 * y)
lowerCAmelCase_ : Dict = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ : List[Any] = simulated_annealing(prob, find_max=False, visualization=True)
print(
'''The minimum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F'{local_min.score()}'
)
lowerCAmelCase_ : List[str] = SearchProblem(x=3, y=4, step_size=1, function_to_optimize=test_fa)
lowerCAmelCase_ : List[Any] = simulated_annealing(prob, find_max=True, visualization=True)
print(
'''The maximum score for f(x, y) = 3*x^2 - 6*y found via hill climbing: '''
F'{local_min.score()}'
)
| 248 | 1 |
'''simple docstring'''
import json
import logging
import os
import sys
from time import time
from unittest.mock import patch
from transformers.testing_utils import TestCasePlus, require_torch_tpu
logging.basicConfig(level=logging.DEBUG)
lowerCAmelCase :List[str] = logging.getLogger()
def lowerCamelCase ( lowerCAmelCase : Optional[int] ):
"""simple docstring"""
__magic_name__ : Dict = {}
__magic_name__ : Dict = os.path.join(lowerCAmelCase , 'all_results.json' )
if os.path.exists(lowerCAmelCase ):
with open(lowerCAmelCase , 'r' ) as f:
__magic_name__ : List[Any] = json.load(lowerCAmelCase )
else:
raise ValueError(f'can\'t find {path}' )
return results
lowerCAmelCase :Union[str, Any] = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
@require_torch_tpu
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
import xla_spawn
__magic_name__ : Any = self.get_auto_remove_tmp_dir()
__magic_name__ : int = F'\n ./examples/pytorch/text-classification/run_glue.py\n --num_cores=8\n ./examples/pytorch/text-classification/run_glue.py\n --model_name_or_path distilbert-base-uncased\n --output_dir {tmp_dir}\n --overwrite_output_dir\n --train_file ./tests/fixtures/tests_samples/MRPC/train.csv\n --validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv\n --do_train\n --do_eval\n --debug tpu_metrics_debug\n --per_device_train_batch_size=2\n --per_device_eval_batch_size=1\n --learning_rate=1e-4\n --max_steps=10\n --warmup_steps=2\n --seed=42\n --max_seq_length=128\n '.split()
with patch.object(_A , 'argv' , _A ):
__magic_name__ : Tuple = time()
xla_spawn.main()
__magic_name__ : str = time()
__magic_name__ : List[Any] = get_results(_A )
self.assertGreaterEqual(result['eval_accuracy'] , 0.75 )
# Assert that the script takes less than 500 seconds to make sure it doesn't hang.
self.assertLess(end - start , 500 )
def __lowerCAmelCase ( self : Dict ) -> Optional[Any]:
import xla_spawn
__magic_name__ : Dict = '\n ./tests/test_trainer_tpu.py\n --num_cores=8\n ./tests/test_trainer_tpu.py\n '.split()
with patch.object(_A , 'argv' , _A ):
xla_spawn.main() | 331 |
'''simple docstring'''
from __future__ import annotations
from math import ceil, floor, sqrt
def lowerCamelCase ( lowerCAmelCase : int = 200_0000 ):
"""simple docstring"""
__magic_name__ : list[int] = [0]
__magic_name__ : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
__magic_name__ : int = 0
# the area corresponding to the grid that gives the product closest to target
__magic_name__ : int = 0
# an estimate of b, using the quadratic formula
__magic_name__ : float
# the largest integer less than b_estimate
__magic_name__ : int
# the largest integer less than b_estimate
__magic_name__ : int
# the triangle number corresponding to b_floor
__magic_name__ : int
# the triangle number corresponding to b_ceil
__magic_name__ : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
__magic_name__ : Dict = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
__magic_name__ : List[Any] = floor(lowerCAmelCase )
__magic_name__ : Dict = ceil(lowerCAmelCase )
__magic_name__ : Any = triangle_numbers[b_floor]
__magic_name__ : Optional[int] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
__magic_name__ : Any = triangle_b_first_guess * triangle_a
__magic_name__ : Any = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
__magic_name__ : List[str] = triangle_b_second_guess * triangle_a
__magic_name__ : Optional[int] = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F'{solution() = }') | 331 | 1 |
"""simple docstring"""
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_a : int = "src/transformers"
# This is to make sure the transformers module imported is the one in the repo.
_a : Union[str, Any] = direct_transformers_import(PATH_TO_TRANSFORMERS)
_a : Union[str, Any] = transformers.models.auto.configuration_auto.CONFIG_MAPPING
_a : Union[str, Any] = {
# used to compute the property `self.chunk_length`
"EncodecConfig": ["overlap"],
# used as `self.bert_model = BertModel(config, ...)`
"DPRConfig": True,
# not used in modeling files, but it's an important information
"FSMTConfig": ["langs"],
# used internally in the configuration class file
"GPTNeoConfig": ["attention_types"],
# used internally in the configuration class file
"EsmConfig": ["is_folding_model"],
# used during training (despite we don't have training script for these models yet)
"Mask2FormerConfig": ["ignore_value"],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"OneFormerConfig": ["ignore_value", "norm"],
# used during preprocessing and collation, see `collating_graphormer.py`
"GraphormerConfig": ["spatial_pos_max"],
# used internally in the configuration class file
"T5Config": ["feed_forward_proj"],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"MT5Config": ["feed_forward_proj", "tokenizer_class"],
"UMT5Config": ["feed_forward_proj", "tokenizer_class"],
# used internally in the configuration class file
"LongT5Config": ["feed_forward_proj"],
# used internally in the configuration class file
"SwitchTransformersConfig": ["feed_forward_proj"],
# having default values other than `1e-5` - we can't fix them without breaking
"BioGptConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"GLPNConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"SegformerConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"CvtConfig": ["layer_norm_eps"],
# having default values other than `1e-5` - we can't fix them without breaking
"PerceiverConfig": ["layer_norm_eps"],
# used internally to calculate the feature size
"InformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"TimeSeriesTransformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate the feature size
"AutoformerConfig": ["num_static_real_features", "num_time_features"],
# used internally to calculate `mlp_dim`
"SamVisionConfig": ["mlp_ratio"],
# For (head) training, but so far not implemented
"ClapAudioConfig": ["num_classes"],
# Not used, but providing useful information to users
"SpeechT5HifiGanConfig": ["sampling_rate"],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'CLIPSegConfig': True,
'DeformableDetrConfig': True,
'DetaConfig': True,
'DinatConfig': True,
'DonutSwinConfig': True,
'EfficientFormerConfig': True,
'FSMTConfig': True,
'JukeboxConfig': True,
'LayoutLMv2Config': True,
'MaskFormerSwinConfig': True,
'MT5Config': True,
'NatConfig': True,
'OneFormerConfig': True,
'PerceiverConfig': True,
'RagConfig': True,
'SpeechT5Config': True,
'SwinConfig': True,
'Swin2SRConfig': True,
'Swinv2Config': True,
'SwitchTransformersConfig': True,
'TableTransformerConfig': True,
'TapasConfig': True,
'TransfoXLConfig': True,
'UniSpeechConfig': True,
'UniSpeechSatConfig': True,
'WavLMConfig': True,
'WhisperConfig': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'JukeboxPriorConfig': True,
# TODO: @Younes (for `is_decoder`)
'Pix2StructTextConfig': True,
}
)
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[Any] ,_lowerCamelCase : Optional[int] ,_lowerCamelCase : str ,_lowerCamelCase : Dict ) -> Union[str, Any]:
_lowerCAmelCase : Tuple = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
f"config.{attribute}" in modeling_source
or f"getattr(config, \"{attribute}\"" in modeling_source
or f"getattr(self.config, \"{attribute}\"" in modeling_source
):
_lowerCAmelCase : Optional[Any] = True
# Deal with multi-line cases
elif (
re.search(
rf"getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*\"{attribute}\"" ,__A ,)
is not None
):
_lowerCAmelCase : Tuple = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
_lowerCAmelCase : Optional[int] = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
_lowerCAmelCase : Optional[int] = [
"""bos_index""",
"""eos_index""",
"""pad_index""",
"""unk_index""",
"""mask_index""",
"""image_size""",
"""use_cache""",
"""out_features""",
"""out_indices""",
]
_lowerCAmelCase : Dict = ["""encoder_no_repeat_ngram_size"""]
# Special cases to be allowed
_lowerCAmelCase : List[str] = True
if not attribute_used:
_lowerCAmelCase : List[Any] = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
_lowerCAmelCase : Tuple = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
_lowerCAmelCase : Optional[Any] = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
_lowerCAmelCase : Union[str, Any] = True
elif attribute.endswith("""_token_id""" ):
_lowerCAmelCase : Union[str, Any] = True
# configuration class specific cases
if not case_allowed:
_lowerCAmelCase : Tuple = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ ,[] )
_lowerCAmelCase : int = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : Tuple ) -> List[Any]:
_lowerCAmelCase : List[Any] = dict(inspect.signature(config_class.__init__ ).parameters )
_lowerCAmelCase : str = [x for x in list(signature.keys() ) if x not in ["""self""", """kwargs"""]]
_lowerCAmelCase : Dict = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
_lowerCAmelCase : str = {}
if len(config_class.attribute_map ) > 0:
_lowerCAmelCase : Union[str, Any] = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
_lowerCAmelCase : Optional[int] = inspect.getsourcefile(__A )
_lowerCAmelCase : Optional[int] = os.path.dirname(__A )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
_lowerCAmelCase : Dict = [os.path.join(__A ,__A ) for fn in os.listdir(__A ) if fn.startswith("""modeling_""" )]
# Get the source code strings
_lowerCAmelCase : List[Any] = []
for path in modeling_paths:
if os.path.isfile(__A ):
with open(__A ) as fp:
modeling_sources.append(fp.read() )
_lowerCAmelCase : Any = []
for config_param, default_value in zip(__A ,__A ):
# `attributes` here is all the variant names for `config_param`
_lowerCAmelCase : List[str] = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(__A ,__A ,__A ,__A ):
unused_attributes.append(attributes[0] )
return sorted(__A )
def SCREAMING_SNAKE_CASE ( ) -> Any:
_lowerCAmelCase : Dict = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
_lowerCAmelCase : Any = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) ,lambda _lowerCamelCase : inspect.isclass(__A )
and issubclass(__A ,__A )
and inspect.getmodule(__A ) == inspect.getmodule(_config_class ) ,)
]
for config_class in config_classes_in_module:
_lowerCAmelCase : int = check_config_attributes_being_used(__A )
if len(__A ) > 0:
_lowerCAmelCase : Any = unused_attributes
if len(__A ) > 0:
_lowerCAmelCase : str = """The following configuration classes contain unused attributes in the corresponding modeling files:\n"""
for name, attributes in configs_with_unused_attributes.items():
error += f"{name}: {attributes}\n"
raise ValueError(__A )
if __name__ == "__main__":
check_config_attributes()
| 357 | """simple docstring"""
import argparse
from tax import checkpoints
from transformers import AutoConfig, FlaxAutoModelForSeqaSeqLM
def SCREAMING_SNAKE_CASE ( _lowerCamelCase : List[str] ,_lowerCamelCase : Any ,_lowerCamelCase : Optional[Any] ) -> str:
_lowerCAmelCase : str = AutoConfig.from_pretrained(_lowerCamelCase )
_lowerCAmelCase : int = FlaxAutoModelForSeqaSeqLM.from_config(config=_lowerCamelCase )
_lowerCAmelCase : Any = checkpoints.load_tax_checkpoint(_lowerCamelCase )
_lowerCAmelCase : Tuple = """wi_0""" in tax_model["""target"""]["""encoder"""]["""layers_0"""]["""mlp"""]
if config.model_type == "t5":
_lowerCAmelCase : Tuple = """SelfAttention"""
if config.model_type == "longt5" and config.encoder_attention_type == "local":
_lowerCAmelCase : Optional[Any] = """LocalSelfAttention"""
elif config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowerCAmelCase : Union[str, Any] = """TransientGlobalSelfAttention"""
else:
raise ValueError(
"""Given config is expected to have `model_type='t5'`, or `model_type='longt5` with `encoder_attention_type`"""
""" attribute with a value from ['local', 'transient-global].""" )
# Encoder
for layer_index in range(config.num_layers ):
_lowerCAmelCase : Tuple = f"layers_{str(_lowerCamelCase )}"
# Self-Attention
_lowerCAmelCase : List[str] = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""key"""]["""kernel"""]
_lowerCAmelCase : str = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""out"""]["""kernel"""]
_lowerCAmelCase : str = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""query"""]["""kernel"""]
_lowerCAmelCase : Any = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""value"""]["""kernel"""]
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowerCAmelCase : Dict = tax_model["""target"""]["""encoder"""][layer_name]["""attention"""]["""T5LayerNorm_0"""]["""scale"""]
# Layer Normalization
_lowerCAmelCase : Any = tax_model["""target"""]["""encoder"""][layer_name]["""pre_attention_layer_norm"""]["""scale"""]
if split_mlp_wi:
_lowerCAmelCase : List[str] = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""]
_lowerCAmelCase : int = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""]
else:
_lowerCAmelCase : List[str] = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""]
_lowerCAmelCase : Tuple = tax_model["""target"""]["""encoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""]
# Layer Normalization
_lowerCAmelCase : Tuple = tax_model["""target"""]["""encoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""]
# Assigning
_lowerCAmelCase : Any = flax_model.params["""encoder"""]["""block"""][str(_lowerCamelCase )]["""layer"""]
_lowerCAmelCase : Any = tax_attention_key
_lowerCAmelCase : str = tax_attention_out
_lowerCAmelCase : Union[str, Any] = tax_attention_query
_lowerCAmelCase : Optional[Any] = tax_attention_value
_lowerCAmelCase : List[str] = tax_attention_layer_norm
# Global input layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowerCAmelCase : Any = tax_global_layer_norm
if split_mlp_wi:
_lowerCAmelCase : Dict = tax_mlp_wi_a
_lowerCAmelCase : List[Any] = tax_mlp_wi_a
else:
_lowerCAmelCase : List[str] = tax_mlp_wi
_lowerCAmelCase : str = tax_mlp_wo
_lowerCAmelCase : Optional[Any] = tax_mlp_layer_norm
_lowerCAmelCase : Any = flax_model_encoder_layer_block
# Only for layer 0:
_lowerCAmelCase : Union[str, Any] = tax_model["""target"""]["""encoder"""]["""relpos_bias"""]["""rel_embedding"""].T
_lowerCAmelCase : Optional[Any] = tax_encoder_rel_embedding
# Side/global relative position_bias + layer norm
if config.model_type == "longt5" and config.encoder_attention_type == "transient-global":
_lowerCAmelCase : List[str] = tax_model["""target"""]["""encoder"""]["""side_relpos_bias"""]["""rel_embedding"""].T
_lowerCAmelCase : Optional[int] = tax_encoder_global_rel_embedding
# Assigning
_lowerCAmelCase : Any = tax_model["""target"""]["""encoder"""]["""encoder_norm"""]["""scale"""]
_lowerCAmelCase : Any = tax_encoder_norm
# Decoder
for layer_index in range(config.num_layers ):
_lowerCAmelCase : Optional[int] = f"layers_{str(_lowerCamelCase )}"
# Self-Attention
_lowerCAmelCase : List[str] = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""key"""]["""kernel"""]
_lowerCAmelCase : int = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""out"""]["""kernel"""]
_lowerCAmelCase : List[Any] = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""query"""]["""kernel"""]
_lowerCAmelCase : str = tax_model["""target"""]["""decoder"""][layer_name]["""self_attention"""]["""value"""]["""kernel"""]
# Layer Normalization
_lowerCAmelCase : Optional[Any] = tax_model["""target"""]["""decoder"""][layer_name]["""pre_self_attention_layer_norm"""][
"""scale"""
]
# Encoder-Decoder-Attention
_lowerCAmelCase : List[Any] = tax_model["""target"""]["""decoder"""][layer_name]["""encoder_decoder_attention"""]
_lowerCAmelCase : List[str] = tax_enc_dec_attention_module["""key"""]["""kernel"""]
_lowerCAmelCase : List[Any] = tax_enc_dec_attention_module["""out"""]["""kernel"""]
_lowerCAmelCase : List[str] = tax_enc_dec_attention_module["""query"""]["""kernel"""]
_lowerCAmelCase : Dict = tax_enc_dec_attention_module["""value"""]["""kernel"""]
# Layer Normalization
_lowerCAmelCase : Any = tax_model["""target"""]["""decoder"""][layer_name]["""pre_cross_attention_layer_norm"""]["""scale"""]
# MLP
if split_mlp_wi:
_lowerCAmelCase : Any = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_0"""]["""kernel"""]
_lowerCAmelCase : List[str] = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi_1"""]["""kernel"""]
else:
_lowerCAmelCase : Any = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wi"""]["""kernel"""]
_lowerCAmelCase : Optional[int] = tax_model["""target"""]["""decoder"""][layer_name]["""mlp"""]["""wo"""]["""kernel"""]
# Layer Normalization
_lowerCAmelCase : Optional[int] = tax_model["""target"""]["""decoder"""][layer_name]["""pre_mlp_layer_norm"""]["""scale"""]
# Assigning
_lowerCAmelCase : str = flax_model.params["""decoder"""]["""block"""][str(_lowerCamelCase )]["""layer"""]
_lowerCAmelCase : int = tax_attention_key
_lowerCAmelCase : List[str] = tax_attention_out
_lowerCAmelCase : Optional[Any] = tax_attention_query
_lowerCAmelCase : Dict = tax_attention_value
_lowerCAmelCase : str = tax_pre_attention_layer_norm
_lowerCAmelCase : List[Any] = tax_enc_dec_attention_key
_lowerCAmelCase : List[Any] = tax_enc_dec_attention_out
_lowerCAmelCase : Tuple = tax_enc_dec_attention_query
_lowerCAmelCase : Any = tax_enc_dec_attention_value
_lowerCAmelCase : Dict = tax_cross_layer_norm
if split_mlp_wi:
_lowerCAmelCase : Dict = tax_mlp_wi_a
_lowerCAmelCase : int = tax_mlp_wi_a
else:
_lowerCAmelCase : Optional[int] = tax_mlp_wi
_lowerCAmelCase : Dict = tax_mlp_wo
_lowerCAmelCase : List[Any] = txa_mlp_layer_norm
_lowerCAmelCase : Optional[Any] = flax_model_decoder_layer_block
# Decoder Normalization
_lowerCAmelCase : Any = tax_model["""target"""]["""decoder"""]["""decoder_norm"""]["""scale"""]
_lowerCAmelCase : List[str] = txa_decoder_norm
# Only for layer 0:
_lowerCAmelCase : Optional[int] = tax_model["""target"""]["""decoder"""]["""relpos_bias"""]["""rel_embedding"""].T
_lowerCAmelCase : Union[str, Any] = tax_decoder_rel_embedding
# Token Embeddings
_lowerCAmelCase : Optional[int] = tax_model["""target"""]["""token_embedder"""]["""embedding"""]
_lowerCAmelCase : Optional[int] = txa_token_embeddings
# LM Head (only in v1.1 and LongT5 checkpoints)
if "logits_dense" in tax_model["target"]["decoder"]:
_lowerCAmelCase : Tuple = tax_model["""target"""]["""decoder"""]["""logits_dense"""]["""kernel"""]
flax_model.save_pretrained(_lowerCamelCase )
print("""T5X Model was sucessfully converted!""" )
if __name__ == "__main__":
_a : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--t5x_checkpoint_path', default=None, type=str, required=True, help='Path the T5X checkpoint.'
)
parser.add_argument('--config_name', default=None, type=str, required=True, help='Config name of LongT5/T5 model.')
parser.add_argument(
'--flax_dump_folder_path', default=None, type=str, required=True, help='Path to the output FLAX model.'
)
_a : List[str] = parser.parse_args()
convert_tax_checkpoint_to_flax(args.tax_checkpoint_path, args.config_name, args.flax_dump_folder_path)
| 126 | 0 |
from .integrations import (
is_optuna_available,
is_ray_available,
is_sigopt_available,
is_wandb_available,
run_hp_search_optuna,
run_hp_search_ray,
run_hp_search_sigopt,
run_hp_search_wandb,
)
from .trainer_utils import (
HPSearchBackend,
default_hp_space_optuna,
default_hp_space_ray,
default_hp_space_sigopt,
default_hp_space_wandb,
)
from .utils import logging
lowercase_ = logging.get_logger(__name__)
class __UpperCamelCase :
"""simple docstring"""
lowerCAmelCase_ = 42
lowerCAmelCase_ = None
@staticmethod
def UpperCAmelCase__ ( ):
"""simple docstring"""
raise NotImplementedError
def UpperCAmelCase__ ( self : List[str] , _A : Any , _A : int , _A : str , **_A : Tuple ):
"""simple docstring"""
raise NotImplementedError
def UpperCAmelCase__ ( self : Optional[int] , _A : int ):
"""simple docstring"""
raise NotImplementedError
def UpperCAmelCase__ ( self : int ):
"""simple docstring"""
if not self.is_available():
raise RuntimeError(
F'''You picked the {self.name} backend, but it is not installed. Run {self.pip_install()}.''' )
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] ):
"""simple docstring"""
return F'''`pip install {cls.pip_package or cls.name}`'''
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''optuna'''
@staticmethod
def UpperCAmelCase__ ( ):
"""simple docstring"""
return is_optuna_available()
def UpperCAmelCase__ ( self : int , _A : Any , _A : int , _A : str , **_A : Optional[Any] ):
"""simple docstring"""
return run_hp_search_optuna(_A , _A , _A , **_A )
def UpperCAmelCase__ ( self : Optional[int] , _A : int ):
"""simple docstring"""
return default_hp_space_optuna(_A )
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''ray'''
lowerCAmelCase_ = '''\'ray[tune]\''''
@staticmethod
def UpperCAmelCase__ ( ):
"""simple docstring"""
return is_ray_available()
def UpperCAmelCase__ ( self : str , _A : Dict , _A : int , _A : str , **_A : str ):
"""simple docstring"""
return run_hp_search_ray(_A , _A , _A , **_A )
def UpperCAmelCase__ ( self : List[Any] , _A : Optional[int] ):
"""simple docstring"""
return default_hp_space_ray(_A )
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''sigopt'''
@staticmethod
def UpperCAmelCase__ ( ):
"""simple docstring"""
return is_sigopt_available()
def UpperCAmelCase__ ( self : List[Any] , _A : Optional[int] , _A : int , _A : str , **_A : str ):
"""simple docstring"""
return run_hp_search_sigopt(_A , _A , _A , **_A )
def UpperCAmelCase__ ( self : str , _A : Tuple ):
"""simple docstring"""
return default_hp_space_sigopt(_A )
class __UpperCamelCase ( lowerCAmelCase__ ):
"""simple docstring"""
lowerCAmelCase_ = '''wandb'''
@staticmethod
def UpperCAmelCase__ ( ):
"""simple docstring"""
return is_wandb_available()
def UpperCAmelCase__ ( self : Union[str, Any] , _A : List[Any] , _A : int , _A : str , **_A : Any ):
"""simple docstring"""
return run_hp_search_wandb(_A , _A , _A , **_A )
def UpperCAmelCase__ ( self : str , _A : List[Any] ):
"""simple docstring"""
return default_hp_space_wandb(_A )
lowercase_ = {
HPSearchBackend(backend.name): backend for backend in [OptunaBackend, RayTuneBackend, SigOptBackend, WandbBackend]
}
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Any = [backend for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() if backend.is_available()]
if len(snake_case ) > 0:
__SCREAMING_SNAKE_CASE : Union[str, Any] = available_backends[0].name
if len(snake_case ) > 1:
logger.info(
F'''{len(snake_case )} hyperparameter search backends available. Using {name} as the default.''' )
return name
raise RuntimeError(
'''No hyperparameter search backend available.\n'''
+ '''\n'''.join(
F''' - To install {backend.name} run {backend.pip_install()}'''
for backend in ALL_HYPERPARAMETER_SEARCH_BACKENDS.values() ) )
| 303 |
import numpy as np
import torch
from imwatermark import WatermarkEncoder
# Copied from https://github.com/Stability-AI/generative-models/blob/613af104c6b85184091d42d374fef420eddb356d/scripts/demo/streamlit_helpers.py#L66
lowercase_ = 0b1011_0011_1110_1100_1001_0000_0111_1011_1011_0001_1001_1110
# bin(x)[2:] gives bits of x as str, use int to convert them to 0/1
lowercase_ = [int(bit) for bit in bin(WATERMARK_MESSAGE)[2:]]
class __UpperCamelCase :
"""simple docstring"""
def __init__( self : Dict ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE : Optional[Any] = WATERMARK_BITS
__SCREAMING_SNAKE_CASE : Optional[int] = WatermarkEncoder()
self.encoder.set_watermark('''bits''' , self.watermark )
def UpperCAmelCase__ ( self : List[Any] , _A : torch.FloatTensor ):
"""simple docstring"""
if images.shape[-1] < 256:
return images
__SCREAMING_SNAKE_CASE : Union[str, Any] = (255 * (images / 2 + 0.5)).cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
__SCREAMING_SNAKE_CASE : Dict = [self.encoder.encode(_A , '''dwtDct''' ) for image in images]
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.from_numpy(np.array(_A ) ).permute(0 , 3 , 1 , 2 )
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.clamp(2 * (images / 255 - 0.5) , min=-1.0 , max=1.0 )
return images
| 303 | 1 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, RobertaTokenizer, RobertaTokenizerFast
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class __magic_name__ (__lowercase , unittest.TestCase ):
lowerCamelCase__ = RobertaTokenizer
lowerCamelCase__ = RobertaTokenizerFast
lowerCamelCase__ = True
lowerCamelCase__ = {'''cls_token''': '''<s>'''}
def __a ( self ) -> Optional[int]:
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase_ = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
lowerCAmelCase_ = dict(zip(_a , range(len(_a ) ) ) )
lowerCAmelCase_ = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
lowerCAmelCase_ = {"unk_token": "<unk>"}
lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
lowerCAmelCase_ = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_a ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_a ) )
def __a ( self , **_a ) -> Any:
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_a )
def __a ( self , **_a ) -> List[Any]:
kwargs.update(self.special_tokens_map )
return RobertaTokenizerFast.from_pretrained(self.tmpdirname , **_a )
def __a ( self , _a ) -> Dict:
lowerCAmelCase_ = "lower newer"
lowerCAmelCase_ = "lower newer"
return input_text, output_text
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase_ = "lower newer"
lowerCAmelCase_ = ["l", "o", "w", "er", "\u0120", "n", "e", "w", "er"]
lowerCAmelCase_ = tokenizer.tokenize(_a ) # , add_prefix_space=True)
self.assertListEqual(_a , _a )
lowerCAmelCase_ = tokens + [tokenizer.unk_token]
lowerCAmelCase_ = [0, 1, 2, 15, 10, 9, 3, 2, 15, 19]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_a ) , _a )
def __a ( self ) -> int:
lowerCAmelCase_ = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=_a ) , [0, 31414, 232, 328, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cécé herlolip 418" , add_special_tokens=_a ) , [0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2] , )
@slow
def __a ( self ) -> int:
lowerCAmelCase_ = self.tokenizer_class.from_pretrained("roberta-base" )
lowerCAmelCase_ = tokenizer.encode("sequence builders" , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer.encode("multi-sequence build" , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer.encode(
"sequence builders" , add_special_tokens=_a , add_prefix_space=_a )
lowerCAmelCase_ = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=_a , add_prefix_space=_a )
lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a )
lowerCAmelCase_ = tokenizer.build_inputs_with_special_tokens(_a , _a )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def __a ( self ) -> Optional[Any]:
lowerCAmelCase_ = self.get_tokenizer()
lowerCAmelCase_ = "Encode this sequence."
lowerCAmelCase_ = tokenizer.byte_encoder[" ".encode("utf-8" )[0]]
# Testing encoder arguments
lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a , add_prefix_space=_a )
lowerCAmelCase_ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(_a , _a )
lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a , add_prefix_space=_a )
lowerCAmelCase_ = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(_a , _a )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
lowerCAmelCase_ = tokenizer.encode(_a , add_special_tokens=_a )
lowerCAmelCase_ = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(_a , _a )
# Testing spaces after special tokens
lowerCAmelCase_ = "<mask>"
tokenizer.add_special_tokens(
{"mask_token": AddedToken(_a , lstrip=_a , rstrip=_a )} ) # mask token has a left space
lowerCAmelCase_ = tokenizer.convert_tokens_to_ids(_a )
lowerCAmelCase_ = "Encode <mask> sequence"
lowerCAmelCase_ = "Encode <mask>sequence"
lowerCAmelCase_ = tokenizer.encode(_a )
lowerCAmelCase_ = encoded.index(_a )
lowerCAmelCase_ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(_a , _a )
lowerCAmelCase_ = tokenizer.encode(_a )
lowerCAmelCase_ = encoded.index(_a )
lowerCAmelCase_ = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(_a , _a )
def __a ( self ) -> Any:
pass
def __a ( self ) -> Optional[int]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = self.tokenizer_class.from_pretrained(_a , **_a )
lowerCAmelCase_ = "A, <mask> AllenNLP sentence."
lowerCAmelCase_ = tokenizer_r.encode_plus(_a , add_special_tokens=_a , return_token_type_ids=_a )
lowerCAmelCase_ = tokenizer_p.encode_plus(_a , add_special_tokens=_a , return_token_type_ids=_a )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
lowerCAmelCase_ = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
lowerCAmelCase_ = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 250, 6, 50264, 3823, 487, 21992, 3645, 4, 2] )
self.assertSequenceEqual(
_a , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
self.assertSequenceEqual(
_a , ["<s>", "A", ",", "<mask>", "ĠAllen", "N", "LP", "Ġsentence", ".", "</s>"] )
def __a ( self ) -> Any:
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=_a , add_prefix_space=_a , trim_offsets=_a )
lowerCAmelCase_ = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
lowerCAmelCase_ = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , _a )
self.assertEqual(post_processor_state["add_prefix_space"] , _a )
self.assertEqual(post_processor_state["trim_offsets"] , _a )
def __a ( self ) -> Optional[int]:
# Test which aims to verify that the offsets are well adapted to the argument `add_prefix_space` and
# `trim_offsets`
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
lowerCAmelCase_ = "hello" # `hello` is a token in the vocabulary of `pretrained_name`
lowerCAmelCase_ = f"{text_of_1_token} {text_of_1_token}"
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a )
lowerCAmelCase_ = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_a ) + 1, len(_a ) + 1 + len(_a )) , )
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a )
lowerCAmelCase_ = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_a ) + 1, len(_a ) + 1 + len(_a )) , )
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a )
lowerCAmelCase_ = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_a ), len(_a ) + 1 + len(_a )) , )
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a )
lowerCAmelCase_ = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (0, len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(_a ), len(_a ) + 1 + len(_a )) , )
lowerCAmelCase_ = f" {text}"
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a )
lowerCAmelCase_ = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_a ) + 1, 1 + len(_a ) + 1 + len(_a )) , )
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a )
lowerCAmelCase_ = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_a ), 1 + len(_a ) + 1 + len(_a )) , )
lowerCAmelCase_ = self.rust_tokenizer_class.from_pretrained(
_a , use_fast=_a , add_prefix_space=_a , trim_offsets=_a )
lowerCAmelCase_ = tokenizer_r(_a , return_offsets_mapping=_a , add_special_tokens=_a )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(_a )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(_a ), 1 + len(_a ) + 1 + len(_a )) , )
| 22 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def A(__a: Tuple , __a: Union[str, Any] ):
lowerCAmelCase_ = checkpoint
lowerCAmelCase_ = {}
lowerCAmelCase_ = vae_state_dict["encoder.conv_in.weight"]
lowerCAmelCase_ = vae_state_dict["encoder.conv_in.bias"]
lowerCAmelCase_ = vae_state_dict["encoder.conv_out.weight"]
lowerCAmelCase_ = vae_state_dict["encoder.conv_out.bias"]
lowerCAmelCase_ = vae_state_dict["encoder.norm_out.weight"]
lowerCAmelCase_ = vae_state_dict["encoder.norm_out.bias"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_in.weight"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_in.bias"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_out.weight"]
lowerCAmelCase_ = vae_state_dict["decoder.conv_out.bias"]
lowerCAmelCase_ = vae_state_dict["decoder.norm_out.weight"]
lowerCAmelCase_ = vae_state_dict["decoder.norm_out.bias"]
lowerCAmelCase_ = vae_state_dict["quant_conv.weight"]
lowerCAmelCase_ = vae_state_dict["quant_conv.bias"]
lowerCAmelCase_ = vae_state_dict["post_quant_conv.weight"]
lowerCAmelCase_ = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
lowerCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
lowerCAmelCase_ = {
layer_id: [key for key in vae_state_dict if F"down.{layer_id}" in key] for layer_id in range(__a )
}
# Retrieves the keys for the decoder up blocks only
lowerCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
lowerCAmelCase_ = {
layer_id: [key for key in vae_state_dict if F"up.{layer_id}" in key] for layer_id in range(__a )
}
for i in range(__a ):
lowerCAmelCase_ = [key for key in down_blocks[i] if F"down.{i}" in key and F"down.{i}.downsample" not in key]
if F"encoder.down.{i}.downsample.conv.weight" in vae_state_dict:
lowerCAmelCase_ = vae_state_dict.pop(
F"encoder.down.{i}.downsample.conv.weight" )
lowerCAmelCase_ = vae_state_dict.pop(
F"encoder.down.{i}.downsample.conv.bias" )
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"down.{i}.block", "new": F"down_blocks.{i}.resnets"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.block" in key]
lowerCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowerCAmelCase_ = [key for key in mid_resnets if F"encoder.mid.block_{i}" in key]
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.attn" in key]
lowerCAmelCase_ = renew_vae_attention_paths(__a )
lowerCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
conv_attn_to_linear(__a )
for i in range(__a ):
lowerCAmelCase_ = num_up_blocks - 1 - i
lowerCAmelCase_ = [
key for key in up_blocks[block_id] if F"up.{block_id}" in key and F"up.{block_id}.upsample" not in key
]
if F"decoder.up.{block_id}.upsample.conv.weight" in vae_state_dict:
lowerCAmelCase_ = vae_state_dict[
F"decoder.up.{block_id}.upsample.conv.weight"
]
lowerCAmelCase_ = vae_state_dict[
F"decoder.up.{block_id}.upsample.conv.bias"
]
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"up.{block_id}.block", "new": F"up_blocks.{i}.resnets"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.block" in key]
lowerCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
lowerCAmelCase_ = [key for key in mid_resnets if F"decoder.mid.block_{i}" in key]
lowerCAmelCase_ = renew_vae_resnet_paths(__a )
lowerCAmelCase_ = {"old": F"mid.block_{i}", "new": F"mid_block.resnets.{i - 1}"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
lowerCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.attn" in key]
lowerCAmelCase_ = renew_vae_attention_paths(__a )
lowerCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__a , __a , __a , additional_replacements=[meta_path] , config=__a )
conv_attn_to_linear(__a )
return new_checkpoint
def A(__a: str , __a: str , ):
# Only support V1
lowerCAmelCase_ = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
lowerCAmelCase_ = io.BytesIO(r.content )
lowerCAmelCase_ = OmegaConf.load(__a )
lowerCAmelCase_ = 512
lowerCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
lowerCAmelCase_ = {}
with safe_open(__a , framework="pt" , device="cpu" ) as f:
for key in f.keys():
lowerCAmelCase_ = f.get_tensor(__a )
else:
lowerCAmelCase_ = torch.load(__a , map_location=__a )["state_dict"]
# Convert the VAE model.
lowerCAmelCase_ = create_vae_diffusers_config(__a , image_size=__a )
lowerCAmelCase_ = custom_convert_ldm_vae_checkpoint(__a , __a )
lowerCAmelCase_ = AutoencoderKL(**__a )
vae.load_state_dict(__a )
vae.save_pretrained(__a )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument('''--vae_pt_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
parser.add_argument('''--dump_path''', default=None, type=str, required=True, help='''Path to the VAE.pt to convert.''')
lowerCamelCase__ = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 22 | 1 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from torchvision.transforms import Compose, Normalize, Resize, ToTensor
from transformers import SwinaSRConfig, SwinaSRForImageSuperResolution, SwinaSRImageProcessor
def lowerCamelCase ( lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = SwinaSRConfig()
if "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__magic_name__ : Any = 4
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
__magic_name__ : int = 4
__magic_name__ : List[Any] = 48
__magic_name__ : Any = 'pixelshuffle_aux'
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__magic_name__ : Optional[Any] = [6, 6, 6, 6]
__magic_name__ : Dict = 60
__magic_name__ : int = [6, 6, 6, 6]
__magic_name__ : Any = 'pixelshuffledirect'
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__magic_name__ : List[Any] = 4
__magic_name__ : List[Any] = 'nearest+conv'
elif "Swin2SR_Jpeg_dynamic" in checkpoint_url:
__magic_name__ : List[Any] = 1
__magic_name__ : Union[str, Any] = 1
__magic_name__ : List[Any] = 126
__magic_name__ : List[str] = 7
__magic_name__ : str = 255.0
__magic_name__ : Any = ''
return config
def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] ):
"""simple docstring"""
if "patch_embed.proj" in name and "layers" not in name:
__magic_name__ : str = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
__magic_name__ : int = name.replace('patch_embed.norm' , 'embeddings.patch_embeddings.layernorm' )
if "layers" in name:
__magic_name__ : Optional[Any] = name.replace('layers' , 'encoder.stages' )
if "residual_group.blocks" in name:
__magic_name__ : List[str] = name.replace('residual_group.blocks' , 'layers' )
if "attn.proj" in name:
__magic_name__ : Optional[Any] = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
__magic_name__ : List[str] = name.replace('attn' , 'attention.self' )
if "norm1" in name:
__magic_name__ : int = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
__magic_name__ : Union[str, Any] = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
__magic_name__ : Union[str, Any] = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__magic_name__ : Dict = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
__magic_name__ : int = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
__magic_name__ : Union[str, Any] = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
__magic_name__ : str = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
__magic_name__ : List[Any] = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if "patch_embed.proj" in name:
__magic_name__ : Optional[int] = name.replace('patch_embed.proj' , 'patch_embed.projection' )
if name == "norm.weight":
__magic_name__ : List[Any] = 'layernorm.weight'
if name == "norm.bias":
__magic_name__ : str = 'layernorm.bias'
if "conv_first" in name:
__magic_name__ : int = name.replace('conv_first' , 'first_convolution' )
if (
"upsample" in name
or "conv_before_upsample" in name
or "conv_bicubic" in name
or "conv_up" in name
or "conv_hr" in name
or "conv_last" in name
or "aux" in name
):
# heads
if "conv_last" in name:
__magic_name__ : Tuple = name.replace('conv_last' , 'final_convolution' )
if config.upsampler in ["pixelshuffle", "pixelshuffle_aux", "nearest+conv"]:
if "conv_before_upsample.0" in name:
__magic_name__ : List[str] = name.replace('conv_before_upsample.0' , 'conv_before_upsample' )
if "upsample.0" in name:
__magic_name__ : Any = name.replace('upsample.0' , 'upsample.convolution_0' )
if "upsample.2" in name:
__magic_name__ : int = name.replace('upsample.2' , 'upsample.convolution_1' )
__magic_name__ : Union[str, Any] = 'upsample.' + name
elif config.upsampler == "pixelshuffledirect":
__magic_name__ : List[str] = name.replace('upsample.0.weight' , 'upsample.conv.weight' )
__magic_name__ : Any = name.replace('upsample.0.bias' , 'upsample.conv.bias' )
else:
pass
else:
__magic_name__ : Optional[Any] = 'swin2sr.' + name
return name
def lowerCamelCase ( lowerCAmelCase : Dict , lowerCAmelCase : int ):
"""simple docstring"""
for key in orig_state_dict.copy().keys():
__magic_name__ : Optional[Any] = orig_state_dict.pop(lowerCAmelCase )
if "qkv" in key:
__magic_name__ : List[Any] = key.split('.' )
__magic_name__ : int = int(key_split[1] )
__magic_name__ : List[str] = int(key_split[4] )
__magic_name__ : List[str] = config.embed_dim
if "weight" in key:
__magic_name__ : int = val[:dim, :]
__magic_name__ : Dict = val[dim : dim * 2, :]
__magic_name__ : Optional[int] = val[-dim:, :]
else:
__magic_name__ : str = val[:dim]
__magic_name__ : str = val[dim : dim * 2]
__magic_name__ : List[str] = val[-dim:]
pass
else:
__magic_name__ : Dict = val
return orig_state_dict
def lowerCamelCase ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str , lowerCAmelCase : str ):
"""simple docstring"""
__magic_name__ : Tuple = get_config(lowerCAmelCase )
__magic_name__ : int = SwinaSRForImageSuperResolution(lowerCAmelCase )
model.eval()
__magic_name__ : str = torch.hub.load_state_dict_from_url(lowerCAmelCase , map_location='cpu' )
__magic_name__ : Optional[int] = convert_state_dict(lowerCAmelCase , lowerCAmelCase )
__magic_name__ , __magic_name__ : List[str] = model.load_state_dict(lowerCAmelCase , strict=lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
raise ValueError('Missing keys when converting: {}'.format(lowerCAmelCase ) )
for key in unexpected_keys:
if not ("relative_position_index" in key or "relative_coords_table" in key or "self_mask" in key):
raise ValueError(f'Unexpected key {key} in state_dict' )
# verify values
__magic_name__ : Optional[int] = 'https://github.com/mv-lab/swin2sr/blob/main/testsets/real-inputs/shanghai.jpg?raw=true'
__magic_name__ : Any = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw ).convert('RGB' )
__magic_name__ : List[str] = SwinaSRImageProcessor()
# pixel_values = processor(image, return_tensors="pt").pixel_values
__magic_name__ : Optional[int] = 126 if 'Jpeg' in checkpoint_url else 256
__magic_name__ : List[Any] = Compose(
[
Resize((image_size, image_size) ),
ToTensor(),
Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
__magic_name__ : int = transforms(lowerCAmelCase ).unsqueeze(0 )
if config.num_channels == 1:
__magic_name__ : Dict = pixel_values[:, 0, :, :].unsqueeze(1 )
__magic_name__ : Dict = model(lowerCAmelCase )
# assert values
if "Swin2SR_ClassicalSR_X2_64" in checkpoint_url:
__magic_name__ : Dict = torch.Size([1, 3, 512, 512] )
__magic_name__ : Optional[int] = torch.tensor(
[[-0.7087, -0.7138, -0.6721], [-0.8340, -0.8095, -0.7298], [-0.9149, -0.8414, -0.7940]] )
elif "Swin2SR_ClassicalSR_X4_64" in checkpoint_url:
__magic_name__ : Optional[int] = torch.Size([1, 3, 1024, 1024] )
__magic_name__ : Optional[Any] = torch.tensor(
[[-0.7775, -0.8105, -0.8933], [-0.7764, -0.8356, -0.9225], [-0.7976, -0.8686, -0.9579]] )
elif "Swin2SR_CompressedSR_X4_48" in checkpoint_url:
# TODO values didn't match exactly here
__magic_name__ : Optional[int] = torch.Size([1, 3, 1024, 1024] )
__magic_name__ : List[str] = torch.tensor(
[[-0.8035, -0.7504, -0.7491], [-0.8538, -0.8124, -0.7782], [-0.8804, -0.8651, -0.8493]] )
elif "Swin2SR_Lightweight_X2_64" in checkpoint_url:
__magic_name__ : List[str] = torch.Size([1, 3, 512, 512] )
__magic_name__ : Tuple = torch.tensor(
[[-0.7669, -0.8662, -0.8767], [-0.8810, -0.9962, -0.9820], [-0.9340, -1.0322, -1.1149]] )
elif "Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR" in checkpoint_url:
__magic_name__ : Optional[int] = torch.Size([1, 3, 1024, 1024] )
__magic_name__ : Optional[Any] = torch.tensor(
[[-0.5238, -0.5557, -0.6321], [-0.6016, -0.5903, -0.6391], [-0.6244, -0.6334, -0.6889]] )
assert (
outputs.reconstruction.shape == expected_shape
), f'Shape of reconstruction should be {expected_shape}, but is {outputs.reconstruction.shape}'
assert torch.allclose(outputs.reconstruction[0, 0, :3, :3] , lowerCAmelCase , atol=1e-3 )
print('Looks ok!' )
__magic_name__ : Any = {
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth': (
'swin2SR-classical-sr-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X4_64.pth': (
'swin2SR-classical-sr-x4-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_CompressedSR_X4_48.pth': (
'swin2SR-compressed-sr-x4-48'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_Lightweight_X2_64.pth': (
'swin2SR-lightweight-x2-64'
),
'https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_RealworldSR_X4_64_BSRGAN_PSNR.pth': (
'swin2SR-realworld-sr-x4-64-bsrgan-psnr'
),
}
__magic_name__ : Optional[int] = url_to_name[checkpoint_url]
if pytorch_dump_folder_path is not None:
print(f'Saving model {model_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
processor.save_pretrained(lowerCAmelCase )
if push_to_hub:
model.push_to_hub(f'caidas/{model_name}' )
processor.push_to_hub(f'caidas/{model_name}' )
if __name__ == "__main__":
lowerCAmelCase :Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/mv-lab/swin2sr/releases/download/v0.0.1/Swin2SR_ClassicalSR_X2_64.pth''',
type=str,
help='''URL of the original Swin2SR checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
parser.add_argument('''--push_to_hub''', action='''store_true''', help='''Whether to push the converted model to the hub.''')
lowerCAmelCase :Optional[int] = parser.parse_args()
convert_swinasr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub) | 331 |
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
lowerCAmelCase :Dict = logging.getLogger(__name__)
require_version('''pytorch_lightning>=1.0.4''')
lowerCAmelCase :str = {
'''base''': AutoModel,
'''sequence-classification''': AutoModelForSequenceClassification,
'''question-answering''': AutoModelForQuestionAnswering,
'''pretraining''': AutoModelForPreTraining,
'''token-classification''': AutoModelForTokenClassification,
'''language-modeling''': AutoModelWithLMHead,
'''summarization''': AutoModelForSeqaSeqLM,
'''translation''': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
lowerCAmelCase :Any = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
lowerCAmelCase :Tuple = sorted(arg_to_scheduler.keys())
lowerCAmelCase :Any = '''{''' + ''', '''.join(arg_to_scheduler_choices) + '''}'''
class _lowerCamelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _A : argparse.Namespace , _A : List[Any]=None , _A : Any="base" , _A : Tuple=None , _A : Union[str, Any]=None , _A : List[Any]=None , **_A : Optional[Any] , ) -> Optional[int]:
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(_A )
__magic_name__ : List[str] = 0
__magic_name__ : Union[str, Any] = Path(self.hparams.output_dir )
__magic_name__ : str = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__magic_name__ : Optional[Any] = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=_A , **_A , )
else:
__magic_name__ : PretrainedConfig = config
__magic_name__ : Any = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams , _A , _A ):
assert hasattr(self.config , _A ), F'model config doesn\'t have a `{p}` attribute'
setattr(self.config , _A , getattr(self.hparams , _A ) )
if tokenizer is None:
__magic_name__ : List[Any] = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=_A , )
else:
__magic_name__ : PreTrainedTokenizer = tokenizer
__magic_name__ : Optional[int] = MODEL_MODES[mode]
if model is None:
__magic_name__ : Tuple = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=_A , )
else:
__magic_name__ : str = model
def __lowerCAmelCase ( self : Optional[int] , *_A : Union[str, Any] , **_A : Union[str, Any] ) -> Tuple:
__magic_name__ : Any = self.model_type.from_pretrained(*_A , **_A )
def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
__magic_name__ : Optional[Any] = arg_to_scheduler[self.hparams.lr_scheduler]
__magic_name__ : str = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
__magic_name__ : int = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
__magic_name__ : Optional[Any] = self.model
__magic_name__ : int = ['bias', 'LayerNorm.weight']
__magic_name__ : Dict = [
{
'params': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'weight_decay': self.hparams.weight_decay,
},
{
'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
if self.hparams.adafactor:
__magic_name__ : str = Adafactor(
_A , lr=self.hparams.learning_rate , scale_parameter=_A , relative_step=_A )
else:
__magic_name__ : Tuple = AdamW(
_A , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
__magic_name__ : List[str] = optimizer
__magic_name__ : int = self.get_lr_scheduler()
return [optimizer], [scheduler]
def __lowerCAmelCase ( self : Optional[Any] , _A : Optional[int] , _A : Tuple ) -> Optional[Any]:
return self.validation_step(_A , _A )
def __lowerCAmelCase ( self : Dict , _A : List[str] ) -> Any:
return self.validation_end(_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> int:
__magic_name__ : int = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
__magic_name__ : Dict = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def __lowerCAmelCase ( self : str , _A : Optional[int] ) -> str:
if stage == "test":
__magic_name__ : Any = len(self.test_dataloader().dataset )
else:
__magic_name__ : List[Any] = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=_A )
__magic_name__ : int = len(self.train_dataloader().dataset )
def __lowerCAmelCase ( self : List[str] , _A : str , _A : int , _A : bool = False ) -> Optional[int]:
raise NotImplementedError('You must implement this for your task' )
def __lowerCAmelCase ( self : int ) -> List[str]:
return self.train_loader
def __lowerCAmelCase ( self : Tuple ) -> int:
return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=_A )
def __lowerCAmelCase ( self : Optional[Any] , _A : Any ) -> str:
return os.path.join(
self.hparams.data_dir , 'cached_{}_{}_{}'.format(
_A , list(filter(_A , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def __lowerCAmelCase ( self : List[str] , _A : Dict[str, Any] ) -> None:
__magic_name__ : Dict = self.output_dir.joinpath('best_tfmr' )
__magic_name__ : List[Any] = self.step_count
self.model.save_pretrained(_A )
self.tokenizer.save_pretrained(_A )
@staticmethod
def __lowerCAmelCase ( _A : List[str] , _A : Optional[Any] ) -> Tuple:
parser.add_argument(
'--model_name_or_path' , default=_A , type=_A , required=_A , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--config_name' , default='' , type=_A , help='Pretrained config name or path if not the same as model_name' )
parser.add_argument(
'--tokenizer_name' , default=_A , type=_A , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument(
'--cache_dir' , default=str(Path(_A ).parent / 'test_run' / 'cache' ) , type=_A , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , )
parser.add_argument(
'--encoder_layerdrop' , type=_A , help='Encoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--decoder_layerdrop' , type=_A , help='Decoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--dropout' , type=_A , help='Dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--attention_dropout' , type=_A , help='Attention dropout probability (Optional). Goes into model.config' , )
parser.add_argument('--learning_rate' , default=5E-5 , type=_A , help='The initial learning rate for Adam.' )
parser.add_argument(
'--lr_scheduler' , default='linear' , choices=_A , metavar=_A , type=_A , help='Learning rate scheduler' , )
parser.add_argument('--weight_decay' , default=0.0 , type=_A , help='Weight decay if we apply some.' )
parser.add_argument('--adam_epsilon' , default=1E-8 , type=_A , help='Epsilon for Adam optimizer.' )
parser.add_argument('--warmup_steps' , default=0 , type=_A , help='Linear warmup over warmup_steps.' )
parser.add_argument('--num_workers' , default=4 , type=_A , help='kwarg passed to DataLoader' )
parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=_A )
parser.add_argument('--train_batch_size' , default=32 , type=_A )
parser.add_argument('--eval_batch_size' , default=32 , type=_A )
parser.add_argument('--adafactor' , action='store_true' )
class _lowerCamelCase ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[str] , _A : List[Any] , _A : List[Any] ) -> List[str]:
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class _lowerCamelCase ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[str] , _A : Dict , _A : str ) -> List[str]:
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(_A )
class _lowerCamelCase ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[int] , _A : List[Any] , _A : Dict ) -> Optional[Any]:
__magic_name__ : Dict = trainer.lr_schedulers[0]['scheduler']
__magic_name__ : int = {F'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(_A )
def __lowerCAmelCase ( self : Any , _A : pl.Trainer , _A : pl.LightningModule ) -> Optional[int]:
rank_zero_info('***** Validation results *****' )
__magic_name__ : str = trainer.callback_metrics
# Log results
for key in sorted(_A ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(_A , str(metrics[key] ) ) )
def __lowerCAmelCase ( self : Union[str, Any] , _A : pl.Trainer , _A : pl.LightningModule ) -> Optional[Any]:
rank_zero_info('***** Test results *****' )
__magic_name__ : Optional[int] = trainer.callback_metrics
# Log and save results to file
__magic_name__ : Optional[Any] = os.path.join(pl_module.hparams.output_dir , 'test_results.txt' )
with open(_A , 'w' ) as writer:
for key in sorted(_A ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(_A , str(metrics[key] ) ) )
writer.write('{} = {}\n'.format(_A , str(metrics[key] ) ) )
def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
parser.add_argument(
'--output_dir' , default=str(Path(lowerCAmelCase ).parent / 'test_run' / 'model_checkpoints' ) , type=lowerCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=lowerCAmelCase , default='O2' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=lowerCAmelCase )
parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=lowerCAmelCase , help='Max gradient norm' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' )
parser.add_argument(
'--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=lowerCAmelCase , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--seed' , type=lowerCAmelCase , default=42 , help='random seed for initialization' )
parser.add_argument(
'--data_dir' , default=str(Path(lowerCAmelCase ).parent / 'test_run' / 'dummy-train-data' ) , type=lowerCAmelCase , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , )
def lowerCamelCase ( lowerCAmelCase : BaseTransformer , lowerCAmelCase : argparse.Namespace , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Optional[Any]=[] , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Any=None , **lowerCAmelCase : Union[str, Any] , ):
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
__magic_name__ : Any = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=lowerCAmelCase )
# add custom checkpoints
if checkpoint_callback is None:
__magic_name__ : List[Any] = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(lowerCAmelCase )
if logging_callback is None:
__magic_name__ : Dict = LoggingCallback()
__magic_name__ : List[str] = {}
if args.fpaa:
__magic_name__ : Dict = 16
if args.gpus > 1:
__magic_name__ : Tuple = 'auto'
__magic_name__ : int = 'ddp'
__magic_name__ : str = args.accumulate_grad_batches
__magic_name__ : str = None
__magic_name__ : List[str] = 'auto'
__magic_name__ : List[Any] = pl.Trainer.from_argparse_args(
lowerCAmelCase , weights_summary=lowerCAmelCase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase , )
if args.do_train:
trainer.fit(lowerCAmelCase )
else:
print('RAG modeling tests with new set functions successfuly executed!' )
return trainer | 331 | 1 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def UpperCAmelCase ( _lowerCamelCase = 3 ):
if isinstance(_lowerCamelCase , _lowerCamelCase ):
raise TypeError("number of qubits must be a integer." )
if number_of_qubits <= 0:
raise ValueError("number of qubits must be > 0." )
if math.floor(_lowerCamelCase ) != number_of_qubits:
raise ValueError("number of qubits must be exact integer." )
if number_of_qubits > 10:
raise ValueError("number of qubits too large to simulate(>10)." )
A : Union[str, Any] = QuantumRegister(_lowerCamelCase , "qr" )
A : Dict = ClassicalRegister(_lowerCamelCase , "cr" )
A : str = QuantumCircuit(_lowerCamelCase , _lowerCamelCase )
A : Dict = number_of_qubits
for i in range(_lowerCamelCase ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(_lowerCamelCase ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , _lowerCamelCase , _lowerCamelCase )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(_lowerCamelCase , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(_lowerCamelCase , _lowerCamelCase )
# simulate with 10000 shots
A : Optional[int] = Aer.get_backend("qasm_simulator" )
A : Tuple = execute(_lowerCamelCase , _lowerCamelCase , shots=1_0000 )
return job.result().get_counts(_lowerCamelCase )
if __name__ == "__main__":
print(
F"""Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"""
) | 368 |
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format="""%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s""",
datefmt="""%Y-%m-%d %H:%M:%S""",
level=os.environ.get("""LOGLEVEL""", """INFO""").upper(),
stream=sys.stdout,
)
__SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
__SCREAMING_SNAKE_CASE = {"""facebook/bart-base""": BartForConditionalGeneration}
__SCREAMING_SNAKE_CASE = {"""facebook/bart-base""": BartTokenizer}
def UpperCAmelCase ( ):
A : List[Any] = argparse.ArgumentParser(description="Export Bart model + Beam Search to ONNX graph." )
parser.add_argument(
"--validation_file" , type=_lowerCamelCase , default=_lowerCamelCase , help="A csv or a json file containing the validation data." )
parser.add_argument(
"--max_length" , type=_lowerCamelCase , default=5 , help="The maximum total input sequence length after tokenization." , )
parser.add_argument(
"--num_beams" , type=_lowerCamelCase , default=_lowerCamelCase , help=(
"Number of beams to use for evaluation. This argument will be "
"passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."
) , )
parser.add_argument(
"--model_name_or_path" , type=_lowerCamelCase , help="Path to pretrained model or model identifier from huggingface.co/models." , required=_lowerCamelCase , )
parser.add_argument(
"--config_name" , type=_lowerCamelCase , default=_lowerCamelCase , help="Pretrained config name or path if not the same as model_name" , )
parser.add_argument(
"--device" , type=_lowerCamelCase , default="cpu" , help="Device where the model will be run" , )
parser.add_argument("--output_file_path" , type=_lowerCamelCase , default=_lowerCamelCase , help="Where to store the final ONNX file." )
A : Any = parser.parse_args()
return args
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase="cpu" ):
A : int = model_dict[model_name].from_pretrained(_lowerCamelCase ).to(_lowerCamelCase )
A : List[Any] = tokenizer_dict[model_name].from_pretrained(_lowerCamelCase )
if model_name in ["facebook/bart-base"]:
A : Optional[int] = 0
A : Union[str, Any] = None
A : Optional[Any] = 0
return huggingface_model, tokenizer
def UpperCAmelCase ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
model.eval()
A : Optional[Any] = None
A : List[Any] = torch.jit.script(BARTBeamSearchGenerator(_lowerCamelCase ) )
with torch.no_grad():
A : int = "My friends are cool but they eat too many carbs."
A : List[Any] = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1024 , return_tensors="pt" ).to(model.device )
A : int = model.generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , num_beams=_lowerCamelCase , max_length=_lowerCamelCase , early_stopping=_lowerCamelCase , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_lowerCamelCase , (
inputs["input_ids"],
inputs["attention_mask"],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _lowerCamelCase , opset_version=14 , input_names=["input_ids", "attention_mask", "num_beams", "max_length", "decoder_start_token_id"] , output_names=["output_ids"] , dynamic_axes={
"input_ids": {0: "batch", 1: "seq"},
"output_ids": {0: "batch", 1: "seq_out"},
} , example_outputs=_lowerCamelCase , )
logger.info("Model exported to {}".format(_lowerCamelCase ) )
A : Optional[Any] = remove_dup_initializers(os.path.abspath(_lowerCamelCase ) )
logger.info("Deduplicated and optimized model written to {}".format(_lowerCamelCase ) )
A : List[Any] = onnxruntime.InferenceSession(_lowerCamelCase )
A : Dict = ort_sess.run(
_lowerCamelCase , {
"input_ids": inputs["input_ids"].cpu().numpy(),
"attention_mask": inputs["attention_mask"].cpu().numpy(),
"num_beams": np.array(_lowerCamelCase ),
"max_length": np.array(_lowerCamelCase ),
"decoder_start_token_id": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 )
logger.info("Model outputs from torch and ONNX Runtime are similar." )
logger.info("Success." )
def UpperCAmelCase ( ):
A : Union[str, Any] = parse_args()
A : List[Any] = 5
A : str = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
A : Union[str, Any] = torch.device(args.device )
A , A : Optional[int] = load_model_tokenizer(args.model_name_or_path , _lowerCamelCase )
if model.config.decoder_start_token_id is None:
raise ValueError("Make sure that `config.decoder_start_token_id` is correctly defined" )
model.to(_lowerCamelCase )
if args.max_length:
A : Optional[int] = args.max_length
if args.num_beams:
A : List[Any] = args.num_beams
if args.output_file_path:
A : int = args.output_file_path
else:
A : int = "BART.onnx"
logger.info("Exporting model to ONNX" )
export_and_validate_model(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
main() | 256 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
UpperCamelCase_ = {
"configuration_distilbert": [
"DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"DistilBertConfig",
"DistilBertOnnxConfig",
],
"tokenization_distilbert": ["DistilBertTokenizer"],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = ["DistilBertTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"DistilBertForMaskedLM",
"DistilBertForMultipleChoice",
"DistilBertForQuestionAnswering",
"DistilBertForSequenceClassification",
"DistilBertForTokenClassification",
"DistilBertModel",
"DistilBertPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFDistilBertForMaskedLM",
"TFDistilBertForMultipleChoice",
"TFDistilBertForQuestionAnswering",
"TFDistilBertForSequenceClassification",
"TFDistilBertForTokenClassification",
"TFDistilBertMainLayer",
"TFDistilBertModel",
"TFDistilBertPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCamelCase_ = [
"FlaxDistilBertForMaskedLM",
"FlaxDistilBertForMultipleChoice",
"FlaxDistilBertForQuestionAnswering",
"FlaxDistilBertForSequenceClassification",
"FlaxDistilBertForTokenClassification",
"FlaxDistilBertModel",
"FlaxDistilBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_distilbert import (
DISTILBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DistilBertConfig,
DistilBertOnnxConfig,
)
from .tokenization_distilbert import DistilBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_distilbert_fast import DistilBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_distilbert import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
DistilBertPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_distilbert import (
TF_DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDistilBertForMaskedLM,
TFDistilBertForMultipleChoice,
TFDistilBertForQuestionAnswering,
TFDistilBertForSequenceClassification,
TFDistilBertForTokenClassification,
TFDistilBertMainLayer,
TFDistilBertModel,
TFDistilBertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_distilbert import (
FlaxDistilBertForMaskedLM,
FlaxDistilBertForMultipleChoice,
FlaxDistilBertForQuestionAnswering,
FlaxDistilBertForSequenceClassification,
FlaxDistilBertForTokenClassification,
FlaxDistilBertModel,
FlaxDistilBertPreTrainedModel,
)
else:
import sys
UpperCamelCase_ = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 251 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCamelCase_ = logging.get_logger(__name__)
UpperCamelCase_ = {"openai-gpt": "https://huggingface.co/openai-gpt/resolve/main/config.json"}
class _a ( SCREAMING_SNAKE_CASE ):
'''simple docstring'''
A : int = '''openai-gpt'''
A : str = {
'''max_position_embeddings''': '''n_positions''',
'''hidden_size''': '''n_embd''',
'''num_attention_heads''': '''n_head''',
'''num_hidden_layers''': '''n_layer''',
}
def __init__( self, A=40_478, A=512, A=768, A=12, A=12, A="gelu", A=0.1, A=0.1, A=0.1, A=1E-5, A=0.02, A="cls_index", A=True, A=None, A=True, A=0.1, **A, ):
'''simple docstring'''
SCREAMING_SNAKE_CASE : int = vocab_size
SCREAMING_SNAKE_CASE : Optional[Any] = n_positions
SCREAMING_SNAKE_CASE : List[str] = n_embd
SCREAMING_SNAKE_CASE : Optional[Any] = n_layer
SCREAMING_SNAKE_CASE : Optional[Any] = n_head
SCREAMING_SNAKE_CASE : str = afn
SCREAMING_SNAKE_CASE : List[str] = resid_pdrop
SCREAMING_SNAKE_CASE : int = embd_pdrop
SCREAMING_SNAKE_CASE : Optional[Any] = attn_pdrop
SCREAMING_SNAKE_CASE : List[str] = layer_norm_epsilon
SCREAMING_SNAKE_CASE : Tuple = initializer_range
SCREAMING_SNAKE_CASE : Optional[int] = summary_type
SCREAMING_SNAKE_CASE : Tuple = summary_use_proj
SCREAMING_SNAKE_CASE : Dict = summary_activation
SCREAMING_SNAKE_CASE : Tuple = summary_first_dropout
SCREAMING_SNAKE_CASE : List[str] = summary_proj_to_labels
super().__init__(**A )
| 251 | 1 |
from pathlib import Path
import json
import tempfile
from transformers import FSMTTokenizer, FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE__ = """tiny-wmt19-en-ru"""
# Build
# borrowed from a test
SCREAMING_SNAKE_CASE__ = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""w</w>""",
"""r</w>""",
"""t</w>""",
"""lo""",
"""low""",
"""er</w>""",
"""low</w>""",
"""lowest</w>""",
"""newer</w>""",
"""wider</w>""",
"""<unk>""",
]
SCREAMING_SNAKE_CASE__ = dict(zip(vocab, range(len(vocab))))
SCREAMING_SNAKE_CASE__ = ["""l o 123""", """lo w 1456""", """e r</w> 1789""", """"""]
with tempfile.TemporaryDirectory() as tmpdirname:
SCREAMING_SNAKE_CASE__ = Path(tmpdirname)
SCREAMING_SNAKE_CASE__ = build_dir / VOCAB_FILES_NAMES["""src_vocab_file"""]
SCREAMING_SNAKE_CASE__ = build_dir / VOCAB_FILES_NAMES["""tgt_vocab_file"""]
SCREAMING_SNAKE_CASE__ = build_dir / VOCAB_FILES_NAMES["""merges_file"""]
with open(src_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(tgt_vocab_file, """w""") as fp:
fp.write(json.dumps(vocab_tokens))
with open(merges_file, """w""") as fp:
fp.write("""\n""".join(merges))
SCREAMING_SNAKE_CASE__ = FSMTTokenizer(
langs=["""en""", """ru"""],
src_vocab_size=len(vocab),
tgt_vocab_size=len(vocab),
src_vocab_file=src_vocab_file,
tgt_vocab_file=tgt_vocab_file,
merges_file=merges_file,
)
SCREAMING_SNAKE_CASE__ = FSMTConfig(
langs=["""ru""", """en"""],
src_vocab_size=1_0_0_0,
tgt_vocab_size=1_0_0_0,
d_model=4,
encoder_layers=1,
decoder_layers=1,
encoder_ffn_dim=4,
decoder_ffn_dim=4,
encoder_attention_heads=1,
decoder_attention_heads=1,
)
SCREAMING_SNAKE_CASE__ = FSMTForConditionalGeneration(config)
print(f"""num of params {tiny_model.num_parameters()}""")
# Test
SCREAMING_SNAKE_CASE__ = tokenizer(["""Making tiny model"""], return_tensors="""pt""")
SCREAMING_SNAKE_CASE__ = tiny_model(**batch)
print("""test output:""", len(outputs.logits[0]))
# Save
tiny_model.half() # makes it smaller
tiny_model.save_pretrained(mname_tiny)
tokenizer.save_pretrained(mname_tiny)
print(f"""Generated {mname_tiny}""")
# Upload
# transformers-cli upload tiny-wmt19-en-ru
| 370 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_gpta import GPTaTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE__ = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
SCREAMING_SNAKE_CASE__ = {
"""vocab_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/vocab.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/vocab.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/vocab.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/vocab.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/vocab.json""",
},
"""merges_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/merges.txt""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/merges.txt""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/merges.txt""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/merges.txt""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""gpt2""": """https://huggingface.co/gpt2/resolve/main/tokenizer.json""",
"""gpt2-medium""": """https://huggingface.co/gpt2-medium/resolve/main/tokenizer.json""",
"""gpt2-large""": """https://huggingface.co/gpt2-large/resolve/main/tokenizer.json""",
"""gpt2-xl""": """https://huggingface.co/gpt2-xl/resolve/main/tokenizer.json""",
"""distilgpt2""": """https://huggingface.co/distilgpt2/resolve/main/tokenizer.json""",
},
}
SCREAMING_SNAKE_CASE__ = {
"""gpt2""": 1_0_2_4,
"""gpt2-medium""": 1_0_2_4,
"""gpt2-large""": 1_0_2_4,
"""gpt2-xl""": 1_0_2_4,
"""distilgpt2""": 1_0_2_4,
}
class __lowerCamelCase ( snake_case_ ):
"""simple docstring"""
lowerCAmelCase__ = VOCAB_FILES_NAMES
lowerCAmelCase__ = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase__ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase__ = ["input_ids", "attention_mask"]
lowerCAmelCase__ = GPTaTokenizer
def __init__( self , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase=None , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase="<|endoftext|>" , UpperCAmelCase=False , **UpperCAmelCase , ) -> Optional[Any]:
'''simple docstring'''
super().__init__(
UpperCAmelCase , UpperCAmelCase , tokenizer_file=UpperCAmelCase , unk_token=UpperCAmelCase , bos_token=UpperCAmelCase , eos_token=UpperCAmelCase , add_prefix_space=UpperCAmelCase , **UpperCAmelCase , )
lowercase_ = kwargs.pop("add_bos_token" , UpperCAmelCase )
lowercase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , UpperCAmelCase ) != add_prefix_space:
lowercase_ = getattr(UpperCAmelCase , pre_tok_state.pop("type" ) )
lowercase_ = add_prefix_space
lowercase_ = pre_tok_class(**UpperCAmelCase )
lowercase_ = add_prefix_space
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , *UpperCAmelCase , **UpperCAmelCase ) -> BatchEncoding:
'''simple docstring'''
lowercase_ = kwargs.get("is_split_into_words" , UpperCAmelCase )
assert self.add_prefix_space or not is_split_into_words, (
F'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
"to use it with pretokenized inputs."
)
return super()._encode_plus(*UpperCAmelCase , **UpperCAmelCase )
def A__ ( self , UpperCAmelCase , UpperCAmelCase = None ) -> Tuple[str]:
'''simple docstring'''
lowercase_ = self._tokenizer.model.save(UpperCAmelCase , name=UpperCAmelCase )
return tuple(UpperCAmelCase )
def A__ ( self , UpperCAmelCase ) -> List[int]:
'''simple docstring'''
lowercase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(UpperCAmelCase , add_special_tokens=UpperCAmelCase ) + [self.eos_token_id] )
if len(UpperCAmelCase ) > self.model_max_length:
lowercase_ = input_ids[-self.model_max_length :]
return input_ids
| 297 | 0 |
'''simple docstring'''
import os
import tempfile
import unittest
import uuid
from pathlib import Path
from transformers.testing_utils import get_tests_dir, require_soundfile, require_torch, require_vision
from transformers.tools.agent_types import AgentAudio, AgentImage, AgentText
from transformers.utils import is_soundfile_availble, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_soundfile_availble():
import soundfile as sf
if is_vision_available():
from PIL import Image
def a ( __a="" ) -> str:
'''simple docstring'''
UpperCamelCase__ :Dict = tempfile.mkdtemp()
return os.path.join(__a , str(uuid.uuida() ) + suffix )
@require_soundfile
@require_torch
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[Any] = torch.rand(12 , dtype=torch.floataa ) - 0.5
UpperCamelCase__ :str = AgentAudio(UpperCamelCase_ )
UpperCamelCase__ :Tuple = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(UpperCamelCase_ , agent_type.to_raw() , atol=1e-4 ) )
del agent_type
# Ensure the path remains even after the object deletion
self.assertTrue(os.path.exists(UpperCamelCase_ ) )
# Ensure that the file contains the same value as the original tensor
UpperCamelCase__ , UpperCamelCase__ :Union[str, Any] = sf.read(UpperCamelCase_ )
self.assertTrue(torch.allclose(UpperCamelCase_ , torch.tensor(UpperCamelCase_ ) , atol=1e-4 ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = torch.rand(12 , dtype=torch.floataa ) - 0.5
UpperCamelCase__ :Optional[Any] = get_new_path(suffix='''.wav''' )
sf.write(UpperCamelCase_ , UpperCamelCase_ , 16000 )
UpperCamelCase__ :List[Any] = AgentAudio(UpperCamelCase_ )
self.assertTrue(torch.allclose(UpperCamelCase_ , agent_type.to_raw() , atol=1e-4 ) )
self.assertEqual(agent_type.to_string() , UpperCamelCase_ )
@require_vision
@require_torch
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[int] = torch.randint(0 , 256 , (64, 64, 3) )
UpperCamelCase__ :List[str] = AgentImage(UpperCamelCase_ )
UpperCamelCase__ :List[Any] = str(agent_type.to_string() )
# Ensure that the tensor and the agent_type's tensor are the same
self.assertTrue(torch.allclose(UpperCamelCase_ , agent_type._tensor , atol=1e-4 ) )
self.assertIsInstance(agent_type.to_raw() , Image.Image )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(UpperCamelCase_ ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Tuple = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
UpperCamelCase__ :str = Image.open(UpperCamelCase_ )
UpperCamelCase__ :int = AgentImage(UpperCamelCase_ )
self.assertTrue(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(UpperCamelCase_ ) )
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :Optional[Any] = Path(get_tests_dir('''fixtures/tests_samples/COCO''' ) ) / '''000000039769.png'''
UpperCamelCase__ :List[Any] = Image.open(UpperCamelCase_ )
UpperCamelCase__ :Optional[Any] = AgentImage(UpperCamelCase_ )
self.assertFalse(path.samefile(agent_type.to_string() ) )
self.assertTrue(image == agent_type.to_raw() )
# Ensure the path remains even after the object deletion
del agent_type
self.assertTrue(os.path.exists(UpperCamelCase_ ) )
class lowercase ( unittest.TestCase ):
"""simple docstring"""
def lowerCAmelCase__ ( self ):
'''simple docstring'''
UpperCamelCase__ :List[str] = '''Hey!'''
UpperCamelCase__ :str = AgentText(UpperCamelCase_ )
self.assertEqual(UpperCamelCase_ , agent_type.to_string() )
self.assertEqual(UpperCamelCase_ , agent_type.to_raw() )
self.assertEqual(UpperCamelCase_ , UpperCamelCase_ ) | 97 |
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def lowerCamelCase_ ( UpperCamelCase__ : List[Any] , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str , UpperCamelCase__ : Any=1024 ) -> Dict:
"""simple docstring"""
__lowerCamelCase , __lowerCamelCase = [], []
__lowerCamelCase = list(zip(UpperCamelCase__ , UpperCamelCase__ ) )
__lowerCamelCase , __lowerCamelCase = sorted_examples[0]
def is_too_big(UpperCamelCase__ : List[str] ):
return tok(UpperCamelCase__ , return_tensors='pt' ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
__lowerCamelCase = new_src + ' ' + src
__lowerCamelCase = new_tgt + ' ' + tgt
if is_too_big(UpperCamelCase__ ) or is_too_big(UpperCamelCase__ ): # cant fit, finalize example
finished_src.append(UpperCamelCase__ )
finished_tgt.append(UpperCamelCase__ )
__lowerCamelCase , __lowerCamelCase = src, tgt
else: # can fit, keep adding
__lowerCamelCase , __lowerCamelCase = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(UpperCamelCase__ )
finished_tgt.append(UpperCamelCase__ )
return finished_src, finished_tgt
def lowerCamelCase_ ( UpperCamelCase__ : str , UpperCamelCase__ : Path , UpperCamelCase__ : Optional[int] , UpperCamelCase__ : str ) -> Optional[int]:
"""simple docstring"""
__lowerCamelCase = Path(UpperCamelCase__ )
save_path.mkdir(exist_ok=UpperCamelCase__ )
for split in ["train"]:
__lowerCamelCase , __lowerCamelCase = data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
__lowerCamelCase = [x.rstrip() for x in Path(UpperCamelCase__ ).open().readlines()]
__lowerCamelCase = [x.rstrip() for x in Path(UpperCamelCase__ ).open().readlines()]
__lowerCamelCase , __lowerCamelCase = pack_examples(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
print(F"""packed {split} split from {len(UpperCamelCase__ )} examples -> {len(UpperCamelCase__ )}.""" )
Path(save_path / F"""{split}.source""" ).open('w' ).write('\n'.join(UpperCamelCase__ ) )
Path(save_path / F"""{split}.target""" ).open('w' ).write('\n'.join(UpperCamelCase__ ) )
for split in ["val", "test"]:
__lowerCamelCase , __lowerCamelCase = data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
shutil.copyfile(UpperCamelCase__ , save_path / F"""{split}.source""" )
shutil.copyfile(UpperCamelCase__ , save_path / F"""{split}.target""" )
def lowerCamelCase_ ( ) -> List[str]:
"""simple docstring"""
__lowerCamelCase = argparse.ArgumentParser()
parser.add_argument('--tok_name' , type=UpperCamelCase__ , help='like facebook/bart-large-cnn,t5-base, etc.' )
parser.add_argument('--max_seq_len' , type=UpperCamelCase__ , default=128 )
parser.add_argument('--data_dir' , type=UpperCamelCase__ )
parser.add_argument('--save_path' , type=UpperCamelCase__ )
__lowerCamelCase = parser.parse_args()
__lowerCamelCase = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(UpperCamelCase__ , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli()
| 90 | 0 |
import math
import os
import sys
def snake_case_ (__A : str ) -> str:
__lowerCAmelCase : List[Any] = """"""
try:
with open(__A , """rb""" ) as binary_file:
__lowerCAmelCase : List[Any] = binary_file.read()
for dat in data:
__lowerCAmelCase : Dict = f'''{dat:08b}'''
result += curr_byte
return result
except OSError:
print("""File not accessible""" )
sys.exit()
def snake_case_ (__A : dict[str, str] , __A : str , __A : int , __A : str ) -> None:
lexicon.pop(__A )
__lowerCAmelCase : List[str] = last_match_id
if math.loga(__A ).is_integer():
for curr_key in lexicon:
__lowerCAmelCase : Optional[int] = """0""" + lexicon[curr_key]
__lowerCAmelCase : Optional[int] = bin(__A )[2:]
def snake_case_ (__A : str ) -> str:
__lowerCAmelCase : Optional[int] = {"""0""": """0""", """1""": """1"""}
__lowerCAmelCase : int = """""", """"""
__lowerCAmelCase : str = len(__A )
for i in range(len(__A ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
__lowerCAmelCase : Optional[int] = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(__A , __A , __A , __A )
index += 1
__lowerCAmelCase : int = """"""
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
__lowerCAmelCase : Optional[Any] = lexicon[curr_string]
result += last_match_id
return result
def snake_case_ (__A : str , __A : str ) -> str:
__lowerCAmelCase : List[str] = os.path.getsize(__A )
__lowerCAmelCase : Optional[int] = bin(__A )[2:]
__lowerCAmelCase : Optional[int] = len(__A )
return "0" * (length_length - 1) + file_length_binary + compressed
def snake_case_ (__A : str , __A : str ) -> None:
__lowerCAmelCase : Tuple = 8
try:
with open(__A , """wb""" ) as opened_file:
__lowerCAmelCase : Any = [
to_write[i : i + byte_length]
for i in range(0 , len(__A ) , __A )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append("""10000000""" )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(__A , 2 ).to_bytes(1 , byteorder="""big""" ) )
except OSError:
print("""File not accessible""" )
sys.exit()
def snake_case_ (__A : str , __A : str ) -> None:
__lowerCAmelCase : int = read_file_binary(__A )
__lowerCAmelCase : Any = compress_data(__A )
__lowerCAmelCase : Optional[Any] = add_file_length(__A , __A )
write_file_binary(__A , __A )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 368 |
__UpperCAmelCase = """
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
"""
__UpperCAmelCase = [{"""type""": """code""", """content""": INSTALL_CONTENT}]
__UpperCAmelCase = {
"""{processor_class}""": """FakeProcessorClass""",
"""{model_class}""": """FakeModelClass""",
"""{object_class}""": """FakeObjectClass""",
}
| 139 | 0 |
'''simple docstring'''
import argparse
import os
from pathlib import Path
import torch
from bark.generation import _load_model as _bark_load_model
from huggingface_hub import hf_hub_download
from transformers import EncodecConfig, EncodecModel, set_seed
from transformers.models.bark.configuration_bark import (
BarkCoarseConfig,
BarkConfig,
BarkFineConfig,
BarkSemanticConfig,
)
from transformers.models.bark.generation_configuration_bark import (
BarkCoarseGenerationConfig,
BarkFineGenerationConfig,
BarkGenerationConfig,
BarkSemanticGenerationConfig,
)
from transformers.models.bark.modeling_bark import BarkCoarseModel, BarkFineModel, BarkModel, BarkSemanticModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCAmelCase = logging.get_logger(__name__)
set_seed(770)
__lowerCAmelCase = {
'''c_attn''': '''att_proj''',
'''c_proj''': '''out_proj''',
'''c_fc''': '''in_proj''',
'''transformer.''': '''''',
'''h.''': '''layers.''',
'''ln_1''': '''layernorm_1''',
'''ln_2''': '''layernorm_2''',
'''ln_f''': '''layernorm_final''',
'''wpe''': '''position_embeds_layer''',
'''wte''': '''input_embeds_layer''',
}
__lowerCAmelCase = {
'''text_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text.pt''',
},
'''coarse_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse.pt''',
},
'''fine_small''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine.pt''',
},
'''text''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''text_2.pt''',
},
'''coarse''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''coarse_2.pt''',
},
'''fine''': {
'''repo_id''': '''suno/bark''',
'''file_name''': '''fine_2.pt''',
},
}
__lowerCAmelCase = os.path.dirname(os.path.abspath(__file__))
__lowerCAmelCase = os.path.join(os.path.expanduser('''~'''), '''.cache''')
__lowerCAmelCase = os.path.join(os.getenv('''XDG_CACHE_HOME''', default_cache_dir), '''suno''', '''bark_v0''')
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=False ) -> Any:
_a : Optional[Any] = model_type
if use_small:
key += "_small"
return os.path.join(lowerCAmelCase_ , REMOTE_MODEL_PATHS[key]['file_name'] )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ ) -> str:
os.makedirs(lowerCAmelCase_ , exist_ok=lowerCAmelCase_ )
hf_hub_download(repo_id=lowerCAmelCase_ , filename=lowerCAmelCase_ , local_dir=lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_="text" ) -> Optional[int]:
if model_type == "text":
_a : List[Any] = BarkSemanticModel
_a : Any = BarkSemanticConfig
_a : str = BarkSemanticGenerationConfig
elif model_type == "coarse":
_a : List[Any] = BarkCoarseModel
_a : List[Any] = BarkCoarseConfig
_a : Tuple = BarkCoarseGenerationConfig
elif model_type == "fine":
_a : List[str] = BarkFineModel
_a : List[Any] = BarkFineConfig
_a : int = BarkFineGenerationConfig
else:
raise NotImplementedError()
_a : List[str] = f"""{model_type}_small""" if use_small else model_type
_a : Dict = REMOTE_MODEL_PATHS[model_key]
if not os.path.exists(lowerCAmelCase_ ):
logger.info(f"""{model_type} model not found, downloading into `{CACHE_DIR}`.""" )
_download(model_info['repo_id'] , model_info['file_name'] )
_a : Optional[int] = torch.load(lowerCAmelCase_ , map_location=lowerCAmelCase_ )
# this is a hack
_a : List[Any] = checkpoint['model_args']
if "input_vocab_size" not in model_args:
_a : Optional[int] = model_args['vocab_size']
_a : List[str] = model_args['vocab_size']
del model_args["vocab_size"]
# convert Bark model arguments to HF Bark model arguments
_a : Any = model_args.pop('n_head' )
_a : List[str] = model_args.pop('n_embd' )
_a : Optional[Any] = model_args.pop('n_layer' )
_a : List[str] = ConfigClass(**checkpoint['model_args'] )
_a : Union[str, Any] = ModelClass(config=lowerCAmelCase_ )
_a : List[str] = GenerationConfigClass()
_a : Dict = model_generation_config
_a : str = checkpoint['model']
# fixup checkpoint
_a : Dict = '_orig_mod.'
for k, v in list(state_dict.items() ):
if k.startswith(lowerCAmelCase_ ):
# replace part of the key with corresponding layer name in HF implementation
_a : Tuple = k[len(lowerCAmelCase_ ) :]
for old_layer_name in new_layer_name_dict:
_a : List[Any] = new_k.replace(lowerCAmelCase_ , new_layer_name_dict[old_layer_name] )
_a : Any = state_dict.pop(lowerCAmelCase_ )
_a : Union[str, Any] = set(state_dict.keys() ) - set(model.state_dict().keys() )
_a : List[Any] = {k for k in extra_keys if not k.endswith('.attn.bias' )}
_a : Dict = set(model.state_dict().keys() ) - set(state_dict.keys() )
_a : List[Any] = {k for k in missing_keys if not k.endswith('.attn.bias' )}
if len(lowerCAmelCase_ ) != 0:
raise ValueError(f"""extra keys found: {extra_keys}""" )
if len(lowerCAmelCase_ ) != 0:
raise ValueError(f"""missing keys: {missing_keys}""" )
model.load_state_dict(lowerCAmelCase_ , strict=lowerCAmelCase_ )
_a : List[str] = model.num_parameters(exclude_embeddings=lowerCAmelCase_ )
_a : List[str] = checkpoint['best_val_loss'].item()
logger.info(f"""model loaded: {round(n_params/1E6 , 1 )}M params, {round(lowerCAmelCase_ , 3 )} loss""" )
model.eval()
model.to(lowerCAmelCase_ )
del checkpoint, state_dict
return model
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_=False , lowerCAmelCase_="text" ) -> str:
if model_type not in ("text", "coarse", "fine"):
raise NotImplementedError()
_a : Optional[int] = 'cpu' # do conversion on cpu
_a : str = _get_ckpt_path(lowerCAmelCase_ , use_small=lowerCAmelCase_ )
_a : Optional[Any] = _load_model(lowerCAmelCase_ , lowerCAmelCase_ , model_type=lowerCAmelCase_ , use_small=lowerCAmelCase_ )
# load bark initial model
_a : Dict = _bark_load_model(lowerCAmelCase_ , 'cpu' , model_type=lowerCAmelCase_ , use_small=lowerCAmelCase_ )
if model_type == "text":
_a : str = bark_model['model']
if model.num_parameters(exclude_embeddings=lowerCAmelCase_ ) != bark_model.get_num_params():
raise ValueError('initial and new models don\'t have the same number of parameters' )
# check if same output as the bark model
_a : Any = 5
_a : int = 10
if model_type in ["text", "coarse"]:
_a : int = torch.randint(256 , (batch_size, sequence_length) , dtype=torch.int )
_a : Optional[Any] = bark_model(lowerCAmelCase_ )[0]
_a : str = model(lowerCAmelCase_ )
# take last logits
_a : List[Any] = output_new_model_total.logits[:, [-1], :]
else:
_a : str = 3
_a : Any = 8
_a : Optional[int] = torch.randint(256 , (batch_size, sequence_length, n_codes_total) , dtype=torch.int )
_a : Optional[int] = model(lowerCAmelCase_ , lowerCAmelCase_ )
_a : Optional[Any] = bark_model(lowerCAmelCase_ , lowerCAmelCase_ )
_a : List[Any] = output_new_model_total.logits
# output difference should come from the difference of self-attention implementation design
if output_new_model.shape != output_old_model.shape:
raise ValueError('initial and new outputs don\'t have the same shape' )
if (output_new_model - output_old_model).abs().max().item() > 1E-3:
raise ValueError('initial and new outputs are not equal' )
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
model.save_pretrained(lowerCAmelCase_ )
def __lowerCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , ) -> Optional[int]:
_a : Optional[Any] = os.path.join(lowerCAmelCase_ , lowerCAmelCase_ )
_a : List[Any] = BarkSemanticConfig.from_pretrained(os.path.join(lowerCAmelCase_ , 'config.json' ) )
_a : List[Any] = BarkCoarseConfig.from_pretrained(os.path.join(lowerCAmelCase_ , 'config.json' ) )
_a : Tuple = BarkFineConfig.from_pretrained(os.path.join(lowerCAmelCase_ , 'config.json' ) )
_a : Optional[Any] = EncodecConfig.from_pretrained('facebook/encodec_24khz' )
_a : Any = BarkSemanticModel.from_pretrained(lowerCAmelCase_ )
_a : Union[str, Any] = BarkCoarseModel.from_pretrained(lowerCAmelCase_ )
_a : Any = BarkFineModel.from_pretrained(lowerCAmelCase_ )
_a : Tuple = EncodecModel.from_pretrained('facebook/encodec_24khz' )
_a : Tuple = BarkConfig.from_sub_model_configs(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_a : List[Any] = BarkGenerationConfig.from_sub_model_configs(
semantic.generation_config , coarseAcoustic.generation_config , fineAcoustic.generation_config )
_a : List[str] = BarkModel(lowerCAmelCase_ )
_a : Any = semantic
_a : List[str] = coarseAcoustic
_a : Dict = fineAcoustic
_a : Union[str, Any] = codec
_a : Any = bark_generation_config
Path(lowerCAmelCase_ ).mkdir(exist_ok=lowerCAmelCase_ )
bark.save_pretrained(lowerCAmelCase_ , repo_id=lowerCAmelCase_ , push_to_hub=lowerCAmelCase_ )
if __name__ == "__main__":
__lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''model_type''', type=str, help='''text, coarse or fine.''')
parser.add_argument('''pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--is_small''', action='''store_true''', help='''convert the small version instead of the large.''')
__lowerCAmelCase = parser.parse_args()
load_model(args.pytorch_dump_folder_path, model_type=args.model_type, use_small=args.is_small)
| 89 |
import os
import unittest
from transformers.models.cpmant.tokenization_cpmant import VOCAB_FILES_NAMES, CpmAntTokenizer
from transformers.testing_utils import require_jieba, tooslow
from ...test_tokenization_common import TokenizerTesterMixin
@require_jieba
class __a ( __UpperCamelCase , unittest.TestCase ):
__lowercase : str = CpmAntTokenizer
__lowercase : List[Any] = False
def SCREAMING_SNAKE_CASE__ ( self ) -> str:
'''simple docstring'''
super().setUp()
lowercase__: Any = [
'<d>',
'</d>',
'<s>',
'</s>',
'</_>',
'<unk>',
'<pad>',
'</n>',
'我',
'是',
'C',
'P',
'M',
'A',
'n',
't',
]
lowercase__: List[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
@tooslow
def SCREAMING_SNAKE_CASE__ ( self ) -> Tuple:
'''simple docstring'''
lowercase__: Optional[int] = CpmAntTokenizer.from_pretrained('openbmb/cpm-ant-10b' )
lowercase__: Optional[Any] = '今天天气真好!'
lowercase__: str = ['今天', '天气', '真', '好', '!']
lowercase__: Optional[Any] = tokenizer.tokenize(lowerCAmelCase__ )
self.assertListEqual(lowerCAmelCase__ , lowerCAmelCase__ )
lowercase__: List[str] = '今天天气真好!'
lowercase__: List[str] = [tokenizer.bos_token] + tokens
lowercase__: Tuple = [6, 9_802, 14_962, 2_082, 831, 244]
self.assertListEqual(tokenizer.convert_tokens_to_ids(lowerCAmelCase__ ) , lowerCAmelCase__ )
lowercase__: Any = tokenizer.decode(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ , lowerCAmelCase__ )
| 196 | 0 |
__A = 0 # The first color of the flag.
__A = 1 # The second color of the flag.
__A = 2 # The third color of the flag.
__A = (red, white, blue)
def lowerCamelCase_ ( UpperCamelCase__ : list ) -> list:
"""simple docstring"""
if not sequence:
return []
if len(UpperCamelCase__ ) == 1:
return list(UpperCamelCase__ )
__lowerCamelCase = 0
__lowerCamelCase = len(UpperCamelCase__ ) - 1
__lowerCamelCase = 0
while mid <= high:
if sequence[mid] == colors[0]:
__lowerCamelCase , __lowerCamelCase = sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
__lowerCamelCase , __lowerCamelCase = sequence[high], sequence[mid]
high -= 1
else:
__lowerCamelCase = F"""The elements inside the sequence must contains only {colors} values"""
raise ValueError(UpperCamelCase__ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
__A = input("Enter numbers separated by commas:\n").strip()
__A = [int(item.strip()) for item in user_input.split(",")]
print(f'''{dutch_national_flag_sort(unsorted)}''')
| 348 |
import sys
from collections import defaultdict
class __lowerCAmelCase :
"""simple docstring"""
def __init__( self ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = []
def lowercase_ ( self , lowerCamelCase__ ) -> List[str]:
'''simple docstring'''
return self.node_position[vertex]
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = pos
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__lowerCamelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__lowerCamelCase = 2 * start + 1
else:
__lowerCamelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
__lowerCamelCase , __lowerCamelCase = heap[smallest_child], positions[smallest_child]
__lowerCamelCase , __lowerCamelCase = (
heap[start],
positions[start],
)
__lowerCamelCase , __lowerCamelCase = temp, tempa
__lowerCamelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , lowerCamelCase__ )
self.top_to_bottom(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ ) -> int:
'''simple docstring'''
__lowerCamelCase = position[index]
while index != 0:
__lowerCamelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__lowerCamelCase = heap[parent]
__lowerCamelCase = position[parent]
self.set_position(position[parent] , lowerCamelCase__ )
else:
__lowerCamelCase = val
__lowerCamelCase = temp
self.set_position(lowerCamelCase__ , lowerCamelCase__ )
break
__lowerCamelCase = parent
else:
__lowerCamelCase = val
__lowerCamelCase = temp
self.set_position(lowerCamelCase__ , 0 )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> str:
'''simple docstring'''
__lowerCamelCase = len(lowerCamelCase__ ) // 2 - 1
for i in range(lowerCamelCase__ , -1 , -1 ):
self.top_to_bottom(lowerCamelCase__ , lowerCamelCase__ , len(lowerCamelCase__ ) , lowerCamelCase__ )
def lowercase_ ( self , lowerCamelCase__ , lowerCamelCase__ ) -> Union[str, Any]:
'''simple docstring'''
__lowerCamelCase = positions[0]
__lowerCamelCase = sys.maxsize
self.top_to_bottom(lowerCamelCase__ , 0 , len(lowerCamelCase__ ) , lowerCamelCase__ )
return temp
def lowerCamelCase_ ( UpperCamelCase__ : Union[str, Any] ) -> List[Any]:
"""simple docstring"""
__lowerCamelCase = Heap()
__lowerCamelCase = [0] * len(UpperCamelCase__ )
__lowerCamelCase = [-1] * len(UpperCamelCase__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__lowerCamelCase = [] # Heap of Distance of vertices from their neighboring vertex
__lowerCamelCase = []
for vertex in range(len(UpperCamelCase__ ) ):
distance_tv.append(sys.maxsize )
positions.append(UpperCamelCase__ )
heap.node_position.append(UpperCamelCase__ )
__lowerCamelCase = []
__lowerCamelCase = 1
__lowerCamelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__lowerCamelCase = 0
__lowerCamelCase = distance
heap.heapify(UpperCamelCase__ , UpperCamelCase__ )
for _ in range(1 , len(UpperCamelCase__ ) ):
__lowerCamelCase = heap.delete_minimum(UpperCamelCase__ , UpperCamelCase__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__lowerCamelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(UpperCamelCase__ )]
):
__lowerCamelCase = distance
heap.bottom_to_top(
UpperCamelCase__ , heap.get_position(UpperCamelCase__ ) , UpperCamelCase__ , UpperCamelCase__ )
__lowerCamelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
__A = int(input("Enter number of edges: ").strip())
__A = defaultdict(list)
for _ in range(edges_number):
__A = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 348 | 1 |
"""simple docstring"""
from PIL import Image
def A_ ( _lowerCAmelCase : Image, _lowerCAmelCase : int ):
"""simple docstring"""
_a = (2_59 * (level + 2_55)) / (2_55 * (2_59 - level))
def contrast(_lowerCAmelCase : int ) -> int:
return int(1_28 + factor * (c - 1_28) )
return img.point(_lowerCAmelCase )
if __name__ == "__main__":
# Load image
with Image.open('''image_data/lena.jpg''') as img:
# Change contrast to 170
__snake_case = change_contrast(img, 170)
cont_img.save('''image_data/lena_high_contrast.png''', format='''png''') | 320 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__snake_case = logging.get_logger(__name__)
__snake_case = {
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class __lowerCamelCase ( a__ ):
'''simple docstring'''
A_ : List[Any] = 'decision_transformer'
A_ : Union[str, Any] = ['past_key_values']
A_ : str = {
'max_position_embeddings': 'n_positions',
'num_attention_heads': 'n_head',
'num_hidden_layers': 'n_layer',
}
def __init__( self , __UpperCAmelCase=17 , __UpperCAmelCase=4 , __UpperCAmelCase=128 , __UpperCAmelCase=4096 , __UpperCAmelCase=True , __UpperCAmelCase=1 , __UpperCAmelCase=1024 , __UpperCAmelCase=3 , __UpperCAmelCase=1 , __UpperCAmelCase=None , __UpperCAmelCase="relu" , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=0.1 , __UpperCAmelCase=1e-5 , __UpperCAmelCase=0.02 , __UpperCAmelCase=True , __UpperCAmelCase=True , __UpperCAmelCase=50256 , __UpperCAmelCase=50256 , __UpperCAmelCase=False , __UpperCAmelCase=False , **__UpperCAmelCase , ) -> Optional[int]:
_a = state_dim
_a = act_dim
_a = hidden_size
_a = max_ep_len
_a = action_tanh
_a = vocab_size
_a = n_positions
_a = n_layer
_a = n_head
_a = n_inner
_a = activation_function
_a = resid_pdrop
_a = embd_pdrop
_a = attn_pdrop
_a = layer_norm_epsilon
_a = initializer_range
_a = scale_attn_weights
_a = use_cache
_a = scale_attn_by_inverse_layer_idx
_a = reorder_and_upcast_attn
_a = bos_token_id
_a = eos_token_id
super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase ) | 320 | 1 |
import json
import os
import unittest
from transformers import DebertaTokenizer, DebertaTokenizerFast
from transformers.models.deberta.tokenization_deberta import VOCAB_FILES_NAMES
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class A__ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = DebertaTokenizer
__magic_name__ = True
__magic_name__ = DebertaTokenizerFast
def a_ ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''\u0120''',
'''\u0120l''',
'''\u0120n''',
'''\u0120lo''',
'''\u0120low''',
'''er''',
'''\u0120lowest''',
'''\u0120newer''',
'''\u0120wider''',
'''[UNK]''',
]
snake_case = dict(zip(__snake_case , range(len(__snake_case ) ) ) )
snake_case = ['''#version: 0.2''', '''\u0120 l''', '''\u0120l o''', '''\u0120lo w''', '''e r''', '''''']
snake_case = {'''unk_token''': '''[UNK]'''}
snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''vocab_file'''] )
snake_case = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['''merges_file'''] )
with open(self.vocab_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write(json.dumps(__snake_case ) + '''\n''' )
with open(self.merges_file , '''w''' , encoding='''utf-8''' ) as fp:
fp.write('''\n'''.join(__snake_case ) )
def a_ ( self , **__snake_case ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **__snake_case )
def a_ ( self , __snake_case ):
snake_case = '''lower newer'''
snake_case = '''lower newer'''
return input_text, output_text
def a_ ( self ):
snake_case = self.get_tokenizer()
snake_case = '''lower newer'''
snake_case = ['''l''', '''o''', '''w''', '''er''', '''\u0120''', '''n''', '''e''', '''w''', '''er''']
snake_case = tokenizer.tokenize(__snake_case )
self.assertListEqual(__snake_case , __snake_case )
snake_case = tokens + [tokenizer.unk_token]
snake_case = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__snake_case ) , __snake_case )
def a_ ( self ):
snake_case = self.get_tokenizer()
snake_case = tokenizer('''Hello''' , '''World''' )
snake_case = [0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1]
self.assertListEqual(tokd['''token_type_ids'''] , __snake_case )
@slow
def a_ ( self ):
snake_case = self.tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
snake_case = tokenizer.encode('''sequence builders''' , add_special_tokens=__snake_case )
snake_case = tokenizer.encode('''multi-sequence build''' , add_special_tokens=__snake_case )
snake_case = tokenizer.encode(
'''sequence builders''' , add_special_tokens=__snake_case , add_prefix_space=__snake_case )
snake_case = tokenizer.encode(
'''sequence builders''' , '''multi-sequence build''' , add_special_tokens=__snake_case , add_prefix_space=__snake_case )
snake_case = tokenizer.build_inputs_with_special_tokens(__snake_case )
snake_case = tokenizer.build_inputs_with_special_tokens(__snake_case , __snake_case )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
@slow
def a_ ( self ):
snake_case = [self.tokenizer_class]
if self.test_rust_tokenizer:
tokenizer_classes.append(self.rust_tokenizer_class )
for tokenizer_class in tokenizer_classes:
snake_case = tokenizer_class.from_pretrained('''microsoft/deberta-base''' )
snake_case = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
snake_case = tokenizer(__snake_case , padding=__snake_case )
snake_case = [tokenizer.decode(__snake_case , skip_special_tokens=__snake_case ) for seq in encoding['''input_ids''']]
# fmt: off
snake_case = {
'''input_ids''': [
[1, 2_1_1_8, 1_1_1_2_6, 5_6_5, 3_5, 8_3, 2_5_1_9_1, 1_6_3, 1_8_8_5_4, 1_3, 1_2_1_5_6, 1_2, 1_6_1_0_1, 2_5_3_7_6, 1_3_8_0_7, 9, 2_2_2_0_5, 2_7_8_9_3, 1_6_3_5, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 2_1_1_8, 1_1_1_2_6, 5_6_5, 2_4_5_3_6, 8_0, 4_3_7_9_7, 4_8_7_8, 7_3_7_3, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1_3_3, 7_8, 6_5, 1_6, 1_0, 3_7_2_4, 1_5_3_8, 3_3_1_8_3, 1_1_3_0_3, 4_3_7_9_7, 1_9_3_8, 4, 8_7_0, 2_4_1_6_5, 2_9_1_0_5, 5, 7_3_9, 3_2_6_4_4, 3_3_1_8_3, 1_1_3_0_3, 3_6_1_7_3, 8_8, 8_0, 6_5_0, 7_8_2_1, 4_5_9_4_0, 6, 5_2, 2_5_5_9, 5, 1_8_3_6, 9, 5, 7_3_9_7, 1_3_1_7_1, 3_1, 5, 1_8_3_6, 9, 3_2_6_4_4, 3_3_1_8_3, 1_1_3_0_3, 4, 2]
],
'''token_type_ids''': [
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
],
'''attention_mask''': [
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
]
}
# fmt: on
snake_case = [
'''ALBERT: A Lite BERT for Self-supervised Learning of Language Representations''',
'''ALBERT incorporates two parameter reduction techniques''',
'''The first one is a factorized embedding parameterization. By decomposing the large vocabulary'''
''' embedding matrix into two small matrices, we separate the size of the hidden layers from the size of'''
''' vocabulary embedding.''',
]
self.assertDictEqual(encoding.data , __snake_case )
for expected, decoded in zip(__snake_case , __snake_case ):
self.assertEqual(__snake_case , __snake_case )
| 213 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import XLMRobertaTokenizerFast
from diffusers import DDIMScheduler, KandinskyImgaImgPipeline, KandinskyPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.pipelines.kandinsky.text_encoder import MCLIPConfig, MultilingualCLIP
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( snake_case__ , unittest.TestCase ):
"""simple docstring"""
__magic_name__ = KandinskyImgaImgPipeline
__magic_name__ = ['prompt', 'image_embeds', 'negative_image_embeds', 'image']
__magic_name__ = [
'prompt',
'negative_prompt',
'image_embeds',
'negative_image_embeds',
'image',
]
__magic_name__ = [
'generator',
'height',
'width',
'strength',
'guidance_scale',
'negative_prompt',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
__magic_name__ = False
@property
def a_ ( self ):
return 3_2
@property
def a_ ( self ):
return 3_2
@property
def a_ ( self ):
return self.time_input_dim
@property
def a_ ( self ):
return self.time_input_dim * 4
@property
def a_ ( self ):
return 1_0_0
@property
def a_ ( self ):
snake_case = XLMRobertaTokenizerFast.from_pretrained('''YiYiXu/tiny-random-mclip-base''' )
return tokenizer
@property
def a_ ( self ):
torch.manual_seed(0 )
snake_case = MCLIPConfig(
numDims=self.cross_attention_dim , transformerDimensions=self.text_embedder_hidden_size , hidden_size=self.text_embedder_hidden_size , intermediate_size=3_7 , num_attention_heads=4 , num_hidden_layers=5 , vocab_size=1_0_0_5 , )
snake_case = MultilingualCLIP(__snake_case )
snake_case = text_encoder.eval()
return text_encoder
@property
def a_ ( self ):
torch.manual_seed(0 )
snake_case = {
'''in_channels''': 4,
# Out channels is double in channels because predicts mean and variance
'''out_channels''': 8,
'''addition_embed_type''': '''text_image''',
'''down_block_types''': ('''ResnetDownsampleBlock2D''', '''SimpleCrossAttnDownBlock2D'''),
'''up_block_types''': ('''SimpleCrossAttnUpBlock2D''', '''ResnetUpsampleBlock2D'''),
'''mid_block_type''': '''UNetMidBlock2DSimpleCrossAttn''',
'''block_out_channels''': (self.block_out_channels_a, self.block_out_channels_a * 2),
'''layers_per_block''': 1,
'''encoder_hid_dim''': self.text_embedder_hidden_size,
'''encoder_hid_dim_type''': '''text_image_proj''',
'''cross_attention_dim''': self.cross_attention_dim,
'''attention_head_dim''': 4,
'''resnet_time_scale_shift''': '''scale_shift''',
'''class_embed_type''': None,
}
snake_case = UNetaDConditionModel(**__snake_case )
return model
@property
def a_ ( self ):
return {
"block_out_channels": [3_2, 6_4],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 1_2,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def a_ ( self ):
torch.manual_seed(0 )
snake_case = VQModel(**self.dummy_movq_kwargs )
return model
def a_ ( self ):
snake_case = self.dummy_text_encoder
snake_case = self.dummy_tokenizer
snake_case = self.dummy_unet
snake_case = self.dummy_movq
snake_case = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_schedule''': '''linear''',
'''beta_start''': 0.0_0085,
'''beta_end''': 0.012,
'''clip_sample''': False,
'''set_alpha_to_one''': False,
'''steps_offset''': 0,
'''prediction_type''': '''epsilon''',
'''thresholding''': False,
}
snake_case = DDIMScheduler(**__snake_case )
snake_case = {
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''unet''': unet,
'''scheduler''': scheduler,
'''movq''': movq,
}
return components
def a_ ( self , __snake_case , __snake_case=0 ):
snake_case = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(__snake_case ) ).to(__snake_case )
snake_case = floats_tensor((1, self.cross_attention_dim) , rng=random.Random(seed + 1 ) ).to(__snake_case )
# create init_image
snake_case = floats_tensor((1, 3, 6_4, 6_4) , rng=random.Random(__snake_case ) ).to(__snake_case )
snake_case = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case = Image.fromarray(np.uinta(__snake_case ) ).convert('''RGB''' ).resize((2_5_6, 2_5_6) )
if str(__snake_case ).startswith('''mps''' ):
snake_case = torch.manual_seed(__snake_case )
else:
snake_case = torch.Generator(device=__snake_case ).manual_seed(__snake_case )
snake_case = {
'''prompt''': '''horse''',
'''image''': init_image,
'''image_embeds''': image_embeds,
'''negative_image_embeds''': negative_image_embeds,
'''generator''': generator,
'''height''': 6_4,
'''width''': 6_4,
'''num_inference_steps''': 1_0,
'''guidance_scale''': 7.0,
'''strength''': 0.2,
'''output_type''': '''np''',
}
return inputs
def a_ ( self ):
snake_case = '''cpu'''
snake_case = self.get_dummy_components()
snake_case = self.pipeline_class(**__snake_case )
snake_case = pipe.to(__snake_case )
pipe.set_progress_bar_config(disable=__snake_case )
snake_case = pipe(**self.get_dummy_inputs(__snake_case ) )
snake_case = output.images
snake_case = pipe(
**self.get_dummy_inputs(__snake_case ) , return_dict=__snake_case , )[0]
snake_case = image[0, -3:, -3:, -1]
snake_case = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case = np.array(
[0.6147_4943, 0.607_3539, 0.4330_8544, 0.592_8269, 0.4749_3595, 0.4675_5973, 0.461_3838, 0.4536_8797, 0.5011_9233] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_slice.flatten()}'''
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), F''' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'''
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
"""simple docstring"""
def a_ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def a_ ( self ):
snake_case = load_numpy(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'''
'''/kandinsky/kandinsky_img2img_frog.npy''' )
snake_case = load_image(
'''https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main''' '''/kandinsky/cat.png''' )
snake_case = '''A red cartoon frog, 4k'''
snake_case = KandinskyPriorPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1-prior''' , torch_dtype=torch.floataa )
pipe_prior.to(__snake_case )
snake_case = KandinskyImgaImgPipeline.from_pretrained(
'''kandinsky-community/kandinsky-2-1''' , torch_dtype=torch.floataa )
snake_case = pipeline.to(__snake_case )
pipeline.set_progress_bar_config(disable=__snake_case )
snake_case = torch.Generator(device='''cpu''' ).manual_seed(0 )
snake_case , snake_case = pipe_prior(
__snake_case , generator=__snake_case , num_inference_steps=5 , negative_prompt='''''' , ).to_tuple()
snake_case = pipeline(
__snake_case , image=__snake_case , image_embeds=__snake_case , negative_image_embeds=__snake_case , generator=__snake_case , num_inference_steps=1_0_0 , height=7_6_8 , width=7_6_8 , strength=0.2 , output_type='''np''' , )
snake_case = output.images[0]
assert image.shape == (7_6_8, 7_6_8, 3)
assert_mean_pixel_difference(__snake_case , __snake_case )
| 213 | 1 |
from __future__ import annotations
import numpy as np
def _UpperCAmelCase ( a__):
'''simple docstring'''
return np.maximum(0 , a__)
if __name__ == "__main__":
print(np.array(relu([-1, 0, 5]))) # --> [0, 0, 5]
| 248 |
from dataclasses import dataclass
from typing import Optional, Tuple
import torch
from torch import nn
from transformers import RobertaPreTrainedModel, XLMRobertaConfig, XLMRobertaModel
from transformers.utils import ModelOutput
@dataclass
class A__(a_ ):
"""simple docstring"""
_A : Optional[torch.FloatTensor] = None
_A : torch.FloatTensor = None
_A : Optional[Tuple[torch.FloatTensor]] = None
_A : Optional[Tuple[torch.FloatTensor]] = None
class A__(a_ ):
"""simple docstring"""
def __init__( self , _lowercase=1 , _lowercase=0 , _lowercase=2 , _lowercase=512 , _lowercase="cls" , _lowercase=False , _lowercase=True , **_lowercase , ) -> Union[str, Any]:
super().__init__(pad_token_id=_lowercase , bos_token_id=_lowercase , eos_token_id=_lowercase , **_lowercase )
a_ : str = project_dim
a_ : List[Any] = pooler_fn
a_ : Union[str, Any] = learn_encoder
a_ : List[str] = use_attention_mask
class A__(a_ ):
"""simple docstring"""
_A : Any = [r'''pooler''', r'''logit_scale''']
_A : List[str] = [r'''position_ids''', r'''predictions.decoder.bias''']
_A : List[str] = '''roberta'''
_A : Union[str, Any] = RobertaSeriesConfig
def __init__( self , _lowercase ) -> Optional[Any]:
super().__init__(_lowercase )
a_ : Optional[int] = XLMRobertaModel(_lowercase )
a_ : Any = nn.Linear(config.hidden_size , config.project_dim )
a_ : Union[str, Any] = getattr(_lowercase , """has_pre_transformation""" , _lowercase )
if self.has_pre_transformation:
a_ : int = nn.Linear(config.hidden_size , config.project_dim )
a_ : Union[str, Any] = nn.LayerNorm(config.hidden_size , eps=config.layer_norm_eps )
self.post_init()
def UpperCamelCase__ ( self , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , _lowercase = None , ) -> Any:
a_ : str = return_dict if return_dict is not None else self.config.use_return_dict
a_ : Any = self.base_model(
input_ids=_lowercase , attention_mask=_lowercase , token_type_ids=_lowercase , position_ids=_lowercase , head_mask=_lowercase , inputs_embeds=_lowercase , encoder_hidden_states=_lowercase , encoder_attention_mask=_lowercase , output_attentions=_lowercase , output_hidden_states=True if self.has_pre_transformation else output_hidden_states , return_dict=_lowercase , )
if self.has_pre_transformation:
a_ : str = outputs["""hidden_states"""][-2]
a_ : Tuple = self.pre_LN(_lowercase )
a_ : List[str] = self.transformation_pre(_lowercase )
return TransformationModelOutput(
projection_state=_lowercase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
else:
a_ : Union[str, Any] = self.transformation(outputs.last_hidden_state )
return TransformationModelOutput(
projection_state=_lowercase , last_hidden_state=outputs.last_hidden_state , hidden_states=outputs.hidden_states , attentions=outputs.attentions , )
| 248 | 1 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCAmelCase__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : List[Any]=13 , __SCREAMING_SNAKE_CASE : List[str]=30 , __SCREAMING_SNAKE_CASE : Any=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=3 , __SCREAMING_SNAKE_CASE : str=True , __SCREAMING_SNAKE_CASE : Tuple=True , __SCREAMING_SNAKE_CASE : Union[str, Any]=32 , __SCREAMING_SNAKE_CASE : Union[str, Any]=2 , __SCREAMING_SNAKE_CASE : Optional[Any]=4 , __SCREAMING_SNAKE_CASE : int=37 , __SCREAMING_SNAKE_CASE : Tuple="gelu" , __SCREAMING_SNAKE_CASE : Tuple=0.1 , __SCREAMING_SNAKE_CASE : Any=0.1 , __SCREAMING_SNAKE_CASE : int=10 , __SCREAMING_SNAKE_CASE : Optional[int]=0.02 , __SCREAMING_SNAKE_CASE : str=3 , __SCREAMING_SNAKE_CASE : Tuple=None , ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2
__SCREAMING_SNAKE_CASE = num_patches + 1
def UpperCAmelCase__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : List[str] ) -> List[Any]:
"""simple docstring"""
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def UpperCAmelCase__ ( self : Any , __SCREAMING_SNAKE_CASE : List[str] , __SCREAMING_SNAKE_CASE : Union[str, Any] , __SCREAMING_SNAKE_CASE : int ) -> str:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = TFViTModel(config=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
__SCREAMING_SNAKE_CASE = self.image_size // 2
__SCREAMING_SNAKE_CASE = pixel_values[:, :, :image_size, :image_size]
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , interpolate_pos_encoding=__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : List[str] , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Tuple , __SCREAMING_SNAKE_CASE : Optional[int] ) -> int:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.type_sequence_label_size
__SCREAMING_SNAKE_CASE = TFViTForImageClassification(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
__SCREAMING_SNAKE_CASE = self.image_size // 2
__SCREAMING_SNAKE_CASE = pixel_values[:, :, :image_size, :image_size]
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE , interpolate_pos_encoding=__SCREAMING_SNAKE_CASE , training=__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = TFViTForImageClassification(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = model(__SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase__ ( self : int ) -> Union[str, Any]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class lowerCAmelCase__ ( a , a , unittest.TestCase ):
"""simple docstring"""
lowerCAmelCase__ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
lowerCAmelCase__ = (
{"feature-extraction": TFViTModel, "image-classification": TFViTForImageClassification}
if is_tf_available()
else {}
)
lowerCAmelCase__ = False
lowerCAmelCase__ = False
lowerCAmelCase__ = False
def UpperCAmelCase__ ( self : List[Any] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = TFViTModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self , config_class=__SCREAMING_SNAKE_CASE , has_text_modality=__SCREAMING_SNAKE_CASE , hidden_size=37 )
def UpperCAmelCase__ ( self : Any ) -> Optional[int]:
"""simple docstring"""
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def UpperCAmelCase__ ( self : Dict ) -> int:
"""simple docstring"""
pass
@unittest.skip(reason="""ViT does not use inputs_embeds""" )
def UpperCAmelCase__ ( self : List[Any] ) -> Any:
"""simple docstring"""
pass
def UpperCAmelCase__ ( self : Any ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
__SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__SCREAMING_SNAKE_CASE , tf.keras.layers.Layer ) )
def UpperCAmelCase__ ( self : Any ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[Any] ) -> Dict:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__SCREAMING_SNAKE_CASE )
@slow
def UpperCAmelCase__ ( self : str ) -> Tuple:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = TFViTModel.from_pretrained("""google/vit-base-patch16-224""" )
self.assertIsNotNone(__SCREAMING_SNAKE_CASE )
def _snake_case ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class lowerCAmelCase__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def UpperCAmelCase__ ( self : Tuple ) -> Optional[Any]:
"""simple docstring"""
return ViTImageProcessor.from_pretrained("""google/vit-base-patch16-224""" ) if is_vision_available() else None
@slow
def UpperCAmelCase__ ( self : Optional[int] ) -> List[str]:
"""simple docstring"""
__SCREAMING_SNAKE_CASE = TFViTForImageClassification.from_pretrained("""google/vit-base-patch16-224""" )
__SCREAMING_SNAKE_CASE = self.default_image_processor
__SCREAMING_SNAKE_CASE = prepare_img()
__SCREAMING_SNAKE_CASE = image_processor(images=__SCREAMING_SNAKE_CASE , return_tensors="""tf""" )
# forward pass
__SCREAMING_SNAKE_CASE = model(**__SCREAMING_SNAKE_CASE )
# verify the logits
__SCREAMING_SNAKE_CASE = tf.TensorShape((1, 1_000) )
self.assertEqual(outputs.logits.shape , __SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = tf.constant([-0.2744, 0.8215, -0.0836] )
tf.debugging.assert_near(outputs.logits[0, :3] , __SCREAMING_SNAKE_CASE , atol=1E-4 )
| 360 |
'''simple docstring'''
from __future__ import annotations
from cmath import sqrt
def a__ ( a__ , a__ , a__ ):
"""simple docstring"""
if a == 0:
raise ValueError("""Coefficient 'a' must not be zero.""" )
__SCREAMING_SNAKE_CASE = b * b - 4 * a * c
__SCREAMING_SNAKE_CASE = (-b + sqrt(a__ )) / (2 * a)
__SCREAMING_SNAKE_CASE = (-b - sqrt(a__ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def a__ ( ):
"""simple docstring"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = quadratic_roots(a=5 , b=6 , c=1 )
print(F'The solutions are: {solutiona} and {solutiona}' )
if __name__ == "__main__":
main()
| 331 | 0 |
'''simple docstring'''
import logging
import os
from .state import PartialState
class UpperCAmelCase_ ( logging.LoggerAdapter ):
@staticmethod
def __UpperCAmelCase ( UpperCAmelCase__ : List[Any] ) -> Optional[int]:
lowerCAmelCase = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def __UpperCAmelCase ( self : List[Any] , UpperCAmelCase__ : str , UpperCAmelCase__ : Any , *UpperCAmelCase__ : List[Any] , **UpperCAmelCase__ : Optional[Any] ) -> List[str]:
if PartialState._shared_state == {}:
raise RuntimeError(
'You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility.' )
lowerCAmelCase = kwargs.pop('main_process_only' , UpperCAmelCase__ )
lowerCAmelCase = kwargs.pop('in_order' , UpperCAmelCase__ )
if self.isEnabledFor(UpperCAmelCase__ ):
if self._should_log(UpperCAmelCase__ ):
lowerCAmelCase , lowerCAmelCase = self.process(UpperCAmelCase__ , UpperCAmelCase__ )
self.logger.log(UpperCAmelCase__ , UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__ )
elif in_order:
lowerCAmelCase = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
lowerCAmelCase , lowerCAmelCase = self.process(UpperCAmelCase__ , UpperCAmelCase__ )
self.logger.log(UpperCAmelCase__ , UpperCAmelCase__ , *UpperCAmelCase__ , **UpperCAmelCase__ )
state.wait_for_everyone()
def a_ ( lowerCamelCase : str , lowerCamelCase : str = None ):
if log_level is None:
lowerCAmelCase = os.environ.get('ACCELERATE_LOG_LEVEL' , lowerCamelCase )
lowerCAmelCase = logging.getLogger(lowerCamelCase )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(lowerCamelCase , {} )
| 4 |
"""simple docstring"""
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_base import BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCAmelCase = logging.get_logger(__name__)
lowerCAmelCase = {"""tokenizer_file""": """tokenizer.json"""}
lowerCAmelCase = {
"""tokenizer_file""": {
"""bigscience/tokenizer""": """https://huggingface.co/bigscience/tokenizer/blob/main/tokenizer.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/tokenizer.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/tokenizer.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/tokenizer.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/tokenizer.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/tokenizer.json""",
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/blob/main/tokenizer.json""",
},
}
class A_ ( A__ ):
"""simple docstring"""
SCREAMING_SNAKE_CASE_ = VOCAB_FILES_NAMES
SCREAMING_SNAKE_CASE_ = PRETRAINED_VOCAB_FILES_MAP
SCREAMING_SNAKE_CASE_ = ["""input_ids""", """attention_mask"""]
SCREAMING_SNAKE_CASE_ = None
def __init__( self :Dict , lowerCamelCase_ :Union[str, Any]=None , lowerCamelCase_ :Any=None , lowerCamelCase_ :int=None , lowerCamelCase_ :List[str]="<unk>" , lowerCamelCase_ :List[Any]="<s>" , lowerCamelCase_ :str="</s>" , lowerCamelCase_ :Union[str, Any]="<pad>" , lowerCamelCase_ :Union[str, Any]=False , lowerCamelCase_ :Dict=False , **lowerCamelCase_ :List[Any] , ):
"""simple docstring"""
super().__init__(
lowerCamelCase_ , lowerCamelCase_ , tokenizer_file=lowerCamelCase_ , unk_token=lowerCamelCase_ , bos_token=lowerCamelCase_ , eos_token=lowerCamelCase_ , pad_token=lowerCamelCase_ , add_prefix_space=lowerCamelCase_ , clean_up_tokenization_spaces=lowerCamelCase_ , **lowerCamelCase_ , )
lowerCamelCase__ : List[str] =json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , lowerCamelCase_ ) != add_prefix_space:
lowerCamelCase__ : str =getattr(lowerCamelCase_ , pre_tok_state.pop('type' ) )
lowerCamelCase__ : List[Any] =add_prefix_space
lowerCamelCase__ : Optional[Any] =pre_tok_class(**lowerCamelCase_ )
lowerCamelCase__ : Any =add_prefix_space
def UpperCAmelCase__ ( self :Optional[int] , *lowerCamelCase_ :List[str] , **lowerCamelCase_ :Optional[Any] ):
"""simple docstring"""
lowerCamelCase__ : List[Any] =kwargs.get('is_split_into_words' , lowerCamelCase_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
' pretokenized inputs.' )
return super()._batch_encode_plus(*lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase__ ( self :int , *lowerCamelCase_ :Optional[Any] , **lowerCamelCase_ :Any ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =kwargs.get('is_split_into_words' , lowerCamelCase_ )
if not (self.add_prefix_space or not is_split_into_words):
raise Exception(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True to use it with"""
' pretokenized inputs.' )
return super()._encode_plus(*lowerCamelCase_ , **lowerCamelCase_ )
def UpperCAmelCase__ ( self :Dict , lowerCamelCase_ :str , lowerCamelCase_ :Optional[str] = None ):
"""simple docstring"""
lowerCamelCase__ : Optional[Any] =self._tokenizer.model.save(lowerCamelCase_ , name=lowerCamelCase_ )
return tuple(lowerCamelCase_ )
def UpperCAmelCase__ ( self :List[Any] , lowerCamelCase_ :"Conversation" ):
"""simple docstring"""
lowerCamelCase__ : Optional[int] =[]
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCamelCase_ , add_special_tokens=lowerCamelCase_ ) + [self.eos_token_id] )
if len(lowerCamelCase_ ) > self.model_max_length:
lowerCamelCase__ : List[str] =input_ids[-self.model_max_length :]
return input_ids | 126 | 0 |
from math import factorial
class __lowercase :
"""simple docstring"""
def __init__( self , A , A ) -> Tuple:
'''simple docstring'''
lowerCamelCase = real
if isinstance(A , A ):
lowerCamelCase = [1] * rank
else:
lowerCamelCase = rank
def __repr__( self ) -> Union[str, Any]:
'''simple docstring'''
return (
F'{self.real}+'
F'{"+".join(str(A )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}'
)
def __A ( self ) -> List[str]:
'''simple docstring'''
lowerCamelCase = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real , A )
def __add__( self , A ) -> Optional[Any]:
'''simple docstring'''
if not isinstance(A , A ):
return Dual(self.real + other , self.duals )
lowerCamelCase = self.duals.copy()
lowerCamelCase = other.duals.copy()
if len(A ) > len(A ):
o_dual.extend([1] * (len(A ) - len(A )) )
elif len(A ) < len(A ):
s_dual.extend([1] * (len(A ) - len(A )) )
lowerCamelCase = []
for i in range(len(A ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real , A )
UpperCamelCase : Any = __add__
def __sub__( self , A ) -> List[str]:
'''simple docstring'''
return self + other * -1
def __mul__( self , A ) -> int:
'''simple docstring'''
if not isinstance(A , A ):
lowerCamelCase = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other , A )
lowerCamelCase = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real , A )
UpperCamelCase : Dict = __mul__
def __truediv__( self , A ) -> str:
'''simple docstring'''
if not isinstance(A , A ):
lowerCamelCase = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other , A )
raise ValueError
def __floordiv__( self , A ) -> Dict:
'''simple docstring'''
if not isinstance(A , A ):
lowerCamelCase = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other , A )
raise ValueError
def __pow__( self , A ) -> int:
'''simple docstring'''
if n < 0 or isinstance(A , A ):
raise ValueError("""power must be a positive integer""" )
if n == 0:
return 1
if n == 1:
return self
lowerCamelCase = self
for _ in range(n - 1 ):
x *= self
return x
def __lowerCamelCase ( lowerCamelCase__ : List[str] , lowerCamelCase__ : Any , lowerCamelCase__ : Optional[Any] ):
'''simple docstring'''
if not callable(lowerCamelCase__ ):
raise ValueError("""differentiate() requires a function as input for func""" )
if not isinstance(lowerCamelCase__ , (float, int) ):
raise ValueError("""differentiate() requires a float as input for position""" )
if not isinstance(lowerCamelCase__ , lowerCamelCase__ ):
raise ValueError("""differentiate() requires an int as input for order""" )
lowerCamelCase = Dual(lowerCamelCase__ , 1 )
lowerCamelCase = func(lowerCamelCase__ )
if order == 0:
return result.real
return result.duals[order - 1] * factorial(lowerCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod()
def __lowerCamelCase ( lowerCamelCase__ : Optional[int] ):
'''simple docstring'''
return y**2 * y**4
print(differentiate(f, 9, 2))
| 66 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
UpperCAmelCase : List[str] = logging.get_logger(__name__)
UpperCAmelCase : List[Any] = {
"BAAI/AltCLIP": "https://huggingface.co/BAAI/AltCLIP/resolve/main/config.json",
# See all AltCLIP models at https://huggingface.co/models?filter=altclip
}
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : Any = "altclip_text_model"
def __init__( self , A=25_00_02 , A=10_24 , A=24 , A=16 , A=40_96 , A="gelu" , A=0.1 , A=0.1 , A=5_14 , A=1 , A=0.02 , A=0.02 , A=1e-0_5 , A=1 , A=0 , A=2 , A="absolute" , A=True , A=7_68 , **A , ) -> int:
'''simple docstring'''
super().__init__(pad_token_id=A , bos_token_id=A , eos_token_id=A , **A )
lowerCamelCase = vocab_size
lowerCamelCase = hidden_size
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = hidden_act
lowerCamelCase = intermediate_size
lowerCamelCase = hidden_dropout_prob
lowerCamelCase = attention_probs_dropout_prob
lowerCamelCase = max_position_embeddings
lowerCamelCase = type_vocab_size
lowerCamelCase = initializer_range
lowerCamelCase = initializer_factor
lowerCamelCase = layer_norm_eps
lowerCamelCase = position_embedding_type
lowerCamelCase = use_cache
lowerCamelCase = project_dim
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : Dict = "altclip_vision_model"
def __init__( self , A=7_68 , A=30_72 , A=5_12 , A=12 , A=12 , A=3 , A=2_24 , A=32 , A="quick_gelu" , A=1e-5 , A=0.0 , A=0.02 , A=1.0 , **A , ) -> Dict:
'''simple docstring'''
super().__init__(**A )
lowerCamelCase = hidden_size
lowerCamelCase = intermediate_size
lowerCamelCase = projection_dim
lowerCamelCase = num_hidden_layers
lowerCamelCase = num_attention_heads
lowerCamelCase = num_channels
lowerCamelCase = patch_size
lowerCamelCase = image_size
lowerCamelCase = initializer_range
lowerCamelCase = initializer_factor
lowerCamelCase = attention_dropout
lowerCamelCase = layer_norm_eps
lowerCamelCase = hidden_act
@classmethod
def __A ( cls , A , **A ) -> "PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(A )
lowerCamelCase , lowerCamelCase = cls.get_config_dict(A , **A )
# get the vision config dict if we are loading from AltCLIPConfig
if config_dict.get("""model_type""" ) == "altclip":
lowerCamelCase = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(A , **A )
class __lowercase ( a_ ):
"""simple docstring"""
UpperCamelCase : Optional[Any] = "altclip"
UpperCamelCase : Optional[Any] = True
def __init__( self , A=None , A=None , A=7_68 , A=2.6592 , **A ) -> Dict:
'''simple docstring'''
lowerCamelCase = kwargs.pop("""text_config_dict""" , A )
lowerCamelCase = kwargs.pop("""vision_config_dict""" , A )
super().__init__(**A )
# Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
# `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
# cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
if text_config_dict is not None:
if text_config is None:
lowerCamelCase = {}
# This is the complete result when using `text_config_dict`.
lowerCamelCase = AltCLIPTextConfig(**A ).to_dict()
# Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
for key, value in _text_config_dict.items():
if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
# If specified in `text_config_dict`
if key in text_config_dict:
lowerCamelCase = (
F'`{key}` is found in both `text_config_dict` and `text_config` but with different values. '
F'The value `text_config_dict["{key}"]` will be used instead.'
)
# If inferred from default argument values (just to be super careful)
else:
lowerCamelCase = (
F'`text_config_dict` is provided which will be used to initialize `AltCLIPTextConfig`. The '
F'value `text_config["{key}"]` will be overriden.'
)
logger.warning(A )
# Update all values in `text_config` with the ones in `_text_config_dict`.
text_config.update(_text_config_dict )
if vision_config_dict is not None:
if vision_config is None:
lowerCamelCase = {}
# This is the complete result when using `vision_config_dict`.
lowerCamelCase = AltCLIPVisionConfig(**A ).to_dict()
# convert keys to string instead of integer
if "id2label" in _vision_config_dict:
lowerCamelCase = {
str(A ): value for key, value in _vision_config_dict["""id2label"""].items()
}
# Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
for key, value in _vision_config_dict.items():
if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
# If specified in `vision_config_dict`
if key in vision_config_dict:
lowerCamelCase = (
F'`{key}` is found in both `vision_config_dict` and `vision_config` but with different '
F'values. The value `vision_config_dict["{key}"]` will be used instead.'
)
# If inferred from default argument values (just to be super careful)
else:
lowerCamelCase = (
F'`vision_config_dict` is provided which will be used to initialize `AltCLIPVisionConfig`. '
F'The value `vision_config["{key}"]` will be overriden.'
)
logger.warning(A )
# Update all values in `vision_config` with the ones in `_vision_config_dict`.
vision_config.update(_vision_config_dict )
if text_config is None:
lowerCamelCase = {}
logger.info("""`text_config` is `None`. Initializing the `AltCLIPTextConfig` with default values.""" )
if vision_config is None:
lowerCamelCase = {}
logger.info("""`vision_config` is `None`. initializing the `AltCLIPVisionConfig` with default values.""" )
lowerCamelCase = AltCLIPTextConfig(**A )
lowerCamelCase = AltCLIPVisionConfig(**A )
lowerCamelCase = projection_dim
lowerCamelCase = logit_scale_init_value
lowerCamelCase = 1.0
@classmethod
def __A ( cls , A , A , **A ) -> Dict:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **A )
def __A ( self ) -> Union[str, Any]:
'''simple docstring'''
lowerCamelCase = copy.deepcopy(self.__dict__ )
lowerCamelCase = self.text_config.to_dict()
lowerCamelCase = self.vision_config.to_dict()
lowerCamelCase = self.__class__.model_type
return output
| 66 | 1 |
'''simple docstring'''
from __future__ import annotations
from statistics import mean
def UpperCAmelCase_ ( __lowercase : list[int] , __lowercase : list[int] , __lowercase : int ) -> list[int]:
'''simple docstring'''
_UpperCAmelCase = [0] * no_of_processes
_UpperCAmelCase = [0] * no_of_processes
# Initialize remaining_time to waiting_time.
for i in range(__lowercase ):
_UpperCAmelCase = burst_time[i]
_UpperCAmelCase = []
_UpperCAmelCase = 0
_UpperCAmelCase = 0
# When processes are not completed,
# A process whose arrival time has passed \
# and has remaining execution time is put into the ready_process.
# The shortest process in the ready_process, target_process is executed.
while completed != no_of_processes:
_UpperCAmelCase = []
_UpperCAmelCase = -1
for i in range(__lowercase ):
if (arrival_time[i] <= total_time) and (remaining_time[i] > 0):
ready_process.append(__lowercase )
if len(__lowercase ) > 0:
_UpperCAmelCase = ready_process[0]
for i in ready_process:
if remaining_time[i] < remaining_time[target_process]:
_UpperCAmelCase = i
total_time += burst_time[target_process]
completed += 1
_UpperCAmelCase = 0
_UpperCAmelCase = (
total_time - arrival_time[target_process] - burst_time[target_process]
)
else:
total_time += 1
return waiting_time
def UpperCAmelCase_ ( __lowercase : list[int] , __lowercase : int , __lowercase : list[int] ) -> list[int]:
'''simple docstring'''
_UpperCAmelCase = [0] * no_of_processes
for i in range(__lowercase ):
_UpperCAmelCase = burst_time[i] + waiting_time[i]
return turn_around_time
if __name__ == "__main__":
print('''[TEST CASE 01]''')
__SCREAMING_SNAKE_CASE :Dict = 4
__SCREAMING_SNAKE_CASE :int = [2, 5, 3, 7]
__SCREAMING_SNAKE_CASE :List[Any] = [0, 0, 0, 0]
__SCREAMING_SNAKE_CASE :Tuple = calculate_waitingtime(arrival_time, burst_time, no_of_processes)
__SCREAMING_SNAKE_CASE :Any = calculate_turnaroundtime(
burst_time, no_of_processes, waiting_time
)
# Printing the Result
print('''PID\tBurst Time\tArrival Time\tWaiting Time\tTurnaround Time''')
for i, process_id in enumerate(list(range(1, 5))):
print(
F"{process_id}\t{burst_time[i]}\t\t\t{arrival_time[i]}\t\t\t\t"
F"{waiting_time[i]}\t\t\t\t{turn_around_time[i]}"
)
print(F"\nAverage waiting time = {mean(waiting_time):.5f}")
print(F"Average turnaround time = {mean(turn_around_time):.5f}")
| 22 |
'''simple docstring'''
from ..utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_pt_objects import * # noqa F403
else:
from .scheduling_consistency_models import CMStochasticIterativeScheduler
from .scheduling_ddim import DDIMScheduler
from .scheduling_ddim_inverse import DDIMInverseScheduler
from .scheduling_ddim_parallel import DDIMParallelScheduler
from .scheduling_ddpm import DDPMScheduler
from .scheduling_ddpm_parallel import DDPMParallelScheduler
from .scheduling_deis_multistep import DEISMultistepScheduler
from .scheduling_dpmsolver_multistep import DPMSolverMultistepScheduler
from .scheduling_dpmsolver_multistep_inverse import DPMSolverMultistepInverseScheduler
from .scheduling_dpmsolver_singlestep import DPMSolverSinglestepScheduler
from .scheduling_euler_ancestral_discrete import EulerAncestralDiscreteScheduler
from .scheduling_euler_discrete import EulerDiscreteScheduler
from .scheduling_heun_discrete import HeunDiscreteScheduler
from .scheduling_ipndm import IPNDMScheduler
from .scheduling_k_dpm_2_ancestral_discrete import KDPMaAncestralDiscreteScheduler
from .scheduling_k_dpm_2_discrete import KDPMaDiscreteScheduler
from .scheduling_karras_ve import KarrasVeScheduler
from .scheduling_pndm import PNDMScheduler
from .scheduling_repaint import RePaintScheduler
from .scheduling_sde_ve import ScoreSdeVeScheduler
from .scheduling_sde_vp import ScoreSdeVpScheduler
from .scheduling_unclip import UnCLIPScheduler
from .scheduling_unipc_multistep import UniPCMultistepScheduler
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin
from .scheduling_vq_diffusion import VQDiffusionScheduler
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_flax_objects import * # noqa F403
else:
from .scheduling_ddim_flax import FlaxDDIMScheduler
from .scheduling_ddpm_flax import FlaxDDPMScheduler
from .scheduling_dpmsolver_multistep_flax import FlaxDPMSolverMultistepScheduler
from .scheduling_karras_ve_flax import FlaxKarrasVeScheduler
from .scheduling_lms_discrete_flax import FlaxLMSDiscreteScheduler
from .scheduling_pndm_flax import FlaxPNDMScheduler
from .scheduling_sde_ve_flax import FlaxScoreSdeVeScheduler
from .scheduling_utils_flax import (
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
broadcast_to_shape_from_left,
)
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .scheduling_lms_discrete import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ..utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .scheduling_dpmsolver_sde import DPMSolverSDEScheduler
| 22 | 1 |
"""simple docstring"""
import numpy as np
import datasets
_a : str= """
Compute the Mahalanobis Distance
Mahalonobis distance is the distance between a point and a distribution.
And not between two distinct points. It is effectively a multivariate equivalent of the Euclidean distance.
It was introduced by Prof. P. C. Mahalanobis in 1936
and has been used in various statistical applications ever since
[source: https://www.machinelearningplus.com/statistics/mahalanobis-distance/]
"""
_a : str= """\
@article{de2000mahalanobis,
title={The mahalanobis distance},
author={De Maesschalck, Roy and Jouan-Rimbaud, Delphine and Massart, D{\'e}sir{\'e} L},
journal={Chemometrics and intelligent laboratory systems},
volume={50},
number={1},
pages={1--18},
year={2000},
publisher={Elsevier}
}
"""
_a : Optional[int]= """
Args:
X: List of datapoints to be compared with the `reference_distribution`.
reference_distribution: List of datapoints from the reference distribution we want to compare to.
Returns:
mahalanobis: The Mahalonobis distance for each datapoint in `X`.
Examples:
>>> mahalanobis_metric = datasets.load_metric(\"mahalanobis\")
>>> results = mahalanobis_metric.compute(reference_distribution=[[0, 1], [1, 0]], X=[[0, 1]])
>>> print(results)
{'mahalanobis': array([0.5])}
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class UpperCamelCase ( datasets.Metric ):
def _lowercase (self : Any) -> Any:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'X': datasets.Sequence(datasets.Value('float' , id='sequence') , id='X'),
}) , )
def _lowercase (self : Optional[Any] , _A : Optional[int] , _A : Optional[Any]) -> Union[str, Any]:
# convert to numpy arrays
__snake_case : Tuple = np.array(_lowercase)
__snake_case : Optional[Any] = np.array(_lowercase)
# Assert that arrays are 2D
if len(X.shape) != 2:
raise ValueError('Expected `X` to be a 2D vector')
if len(reference_distribution.shape) != 2:
raise ValueError('Expected `reference_distribution` to be a 2D vector')
if reference_distribution.shape[0] < 2:
raise ValueError(
'Expected `reference_distribution` to be a 2D vector with more than one element in the first dimension')
# Get mahalanobis distance for each prediction
__snake_case : Optional[int] = X - np.mean(_lowercase)
__snake_case : Tuple = np.cov(reference_distribution.T)
try:
__snake_case : int = np.linalg.inv(_lowercase)
except np.linalg.LinAlgError:
__snake_case : Dict = np.linalg.pinv(_lowercase)
__snake_case : Tuple = np.dot(_lowercase , _lowercase)
__snake_case : Optional[Any] = np.dot(_lowercase , X_minus_mu.T).diagonal()
return {"mahalanobis": mahal_dist}
| 366 | """simple docstring"""
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def __UpperCAmelCase ( UpperCAmelCase_ : Namespace ) -> Union[str, Any]:
'''simple docstring'''
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
_a : str= "\ntransformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires\nTensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.\n"
class UpperCamelCase ( lowercase ):
@staticmethod
def _lowercase (_A : ArgumentParser) -> Tuple:
__snake_case : Optional[Any] = parser.add_parser(
'convert' , help='CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.' , )
train_parser.add_argument('--model_type' , type=_A , required=_A , help='Model\'s type.')
train_parser.add_argument(
'--tf_checkpoint' , type=_A , required=_A , help='TensorFlow checkpoint path or folder.')
train_parser.add_argument(
'--pytorch_dump_output' , type=_A , required=_A , help='Path to the PyTorch saved model output.')
train_parser.add_argument('--config' , type=_A , default='' , help='Configuration file path or folder.')
train_parser.add_argument(
'--finetuning_task_name' , type=_A , default=_A , help='Optional fine-tuning task name if the TF model was a finetuned model.' , )
train_parser.set_defaults(func=_A)
def __init__(self : List[str] , _A : str , _A : str , _A : str , _A : str , _A : str , *_A : Any , ) -> Optional[Any]:
__snake_case : List[Any] = logging.get_logger('transformers-cli/converting')
self._logger.info(f"Loading model {model_type}")
__snake_case : List[str] = model_type
__snake_case : int = tf_checkpoint
__snake_case : Optional[int] = pytorch_dump_output
__snake_case : Optional[Any] = config
__snake_case : Optional[Any] = finetuning_task_name
def _lowercase (self : List[str]) -> str:
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_A)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_A)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_A)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(_A)
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_A)
if "ckpt" in self._tf_checkpoint.lower():
__snake_case : Union[str, Any] = self._tf_checkpoint
__snake_case : List[Any] = ''
else:
__snake_case : Optional[Any] = self._tf_checkpoint
__snake_case : List[Any] = ''
convert_transfo_xl_checkpoint_to_pytorch(
_A , self._config , self._pytorch_dump_output , _A)
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_A)
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(_A)
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name)
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output)
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output)
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output)
else:
raise ValueError(
'--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]')
| 95 | 0 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class a ( _lowercase ):
"""simple docstring"""
lowerCamelCase :List[str] = ['''image_processor''', '''tokenizer''']
lowerCamelCase :str = '''CLIPImageProcessor'''
lowerCamelCase :Any = ('''CLIPTokenizer''', '''CLIPTokenizerFast''')
def __init__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> str:
_A = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __UpperCamelCase , )
_A = kwargs.pop("""feature_extractor""" )
_A = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__UpperCamelCase , __UpperCamelCase )
def __call__( self , lowerCAmelCase_=None , lowerCAmelCase_=None , lowerCAmelCase_=None , **lowerCAmelCase_ ) -> Any:
if text is None and images is None:
raise ValueError("""You have to specify either text or images. Both cannot be none.""" )
if text is not None:
_A = self.tokenizer(__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
if images is not None:
_A = self.image_processor(__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
if text is not None and images is not None:
_A = image_features.pixel_values
return encoding
elif text is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCamelCase ) , tensor_type=__UpperCamelCase )
def UpperCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Dict:
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def UpperCAmelCase ( self , *lowerCAmelCase_ , **lowerCAmelCase_ ) -> Any:
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@property
def UpperCAmelCase ( self ) -> List[Any]:
_A = self.tokenizer.model_input_names
_A = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def UpperCAmelCase ( self ) -> List[str]:
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __UpperCamelCase , )
return self.image_processor_class
@property
def UpperCAmelCase ( self ) -> Optional[Any]:
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __UpperCamelCase , )
return self.image_processor
| 180 | """simple docstring"""
def lowercase ( a__ : int , a__ : int ) -> int:
return int((input_a, input_a).count(1 ) != 0 )
def lowercase ( ) -> None:
assert or_gate(0 , 0 ) == 0
assert or_gate(0 , 1 ) == 1
assert or_gate(1 , 0 ) == 1
assert or_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(or_gate(0, 1))
print(or_gate(1, 0))
print(or_gate(0, 0))
print(or_gate(1, 1))
| 256 | 0 |
import os
import pytest
from transformers.dynamic_module_utils import get_imports
A_ : List[Any] = '\nimport os\n'
A_ : Optional[int] = '\ndef foo():\n import os\n return False\n'
A_ : Tuple = '\ndef foo():\n def bar():\n if True:\n import os\n return False\n return bar()\n'
A_ : List[Any] = '\nimport os\n\ntry:\n import bar\nexcept ImportError:\n raise ValueError()\n'
A_ : Dict = '\nimport os\n\ndef foo():\n try:\n import bar\n except ImportError:\n raise ValueError()\n'
A_ : Dict = '\nimport os\n\ntry:\n import bar\nexcept (ImportError, AttributeError):\n raise ValueError()\n'
A_ : List[Any] = '\nimport os\n\ntry:\n import bar\nexcept ImportError as e:\n raise ValueError()\n'
A_ : Union[str, Any] = '\nimport os\n\ntry:\n import bar\nexcept:\n raise ValueError()\n'
A_ : Any = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n raise ValueError()\n'
A_ : str = '\nimport os\n\ntry:\n import bar\n import baz\nexcept ImportError:\n x = 1\n raise ValueError()\n'
A_ : Optional[int] = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('case' , UpperCAmelCase__ )
def snake_case (UpperCAmelCase__ , UpperCAmelCase__ ) -> Optional[Any]:
UpperCamelCase_: str = os.path.join(UpperCAmelCase__ , 'test_file.py' )
with open(UpperCAmelCase__ , 'w' ) as _tmp_file:
_tmp_file.write(UpperCAmelCase__ )
UpperCamelCase_: Union[str, Any] = get_imports(UpperCAmelCase__ )
assert parsed_imports == ["os"] | 292 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
A_ : List[Any] = logging.get_logger(__name__)
A_ : Optional[Any] = {
'YituTech/conv-bert-base': 'https://huggingface.co/YituTech/conv-bert-base/resolve/main/config.json',
'YituTech/conv-bert-medium-small': (
'https://huggingface.co/YituTech/conv-bert-medium-small/resolve/main/config.json'
),
'YituTech/conv-bert-small': 'https://huggingface.co/YituTech/conv-bert-small/resolve/main/config.json',
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
}
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
a : str ='''convbert'''
def __init__( self , _lowerCamelCase=3_0_5_2_2 , _lowerCamelCase=7_6_8 , _lowerCamelCase=1_2 , _lowerCamelCase=1_2 , _lowerCamelCase=3_0_7_2 , _lowerCamelCase="gelu" , _lowerCamelCase=0.1 , _lowerCamelCase=0.1 , _lowerCamelCase=5_1_2 , _lowerCamelCase=2 , _lowerCamelCase=0.0_2 , _lowerCamelCase=1e-12 , _lowerCamelCase=1 , _lowerCamelCase=0 , _lowerCamelCase=2 , _lowerCamelCase=7_6_8 , _lowerCamelCase=2 , _lowerCamelCase=9 , _lowerCamelCase=1 , _lowerCamelCase=None , **_lowerCamelCase , ):
super().__init__(
pad_token_id=_lowerCamelCase , bos_token_id=_lowerCamelCase , eos_token_id=_lowerCamelCase , **_lowerCamelCase , )
UpperCamelCase_: Dict = vocab_size
UpperCamelCase_: Tuple = hidden_size
UpperCamelCase_: Optional[int] = num_hidden_layers
UpperCamelCase_: Optional[int] = num_attention_heads
UpperCamelCase_: Optional[Any] = intermediate_size
UpperCamelCase_: Tuple = hidden_act
UpperCamelCase_: Any = hidden_dropout_prob
UpperCamelCase_: Any = attention_probs_dropout_prob
UpperCamelCase_: List[Any] = max_position_embeddings
UpperCamelCase_: List[Any] = type_vocab_size
UpperCamelCase_: Optional[int] = initializer_range
UpperCamelCase_: Tuple = layer_norm_eps
UpperCamelCase_: List[str] = embedding_size
UpperCamelCase_: int = head_ratio
UpperCamelCase_: Dict = conv_kernel_size
UpperCamelCase_: List[Any] = num_groups
UpperCamelCase_: Dict = classifier_dropout
class _lowerCAmelCase( UpperCAmelCase_ ):
"""simple docstring"""
@property
def _a ( self ):
if self.task == "multiple-choice":
UpperCamelCase_: Dict = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
UpperCamelCase_: Dict = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] ) | 292 | 1 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase_ = logging.get_logger(__name__)
class _UpperCAmelCase ( snake_case_ ):
"""simple docstring"""
snake_case = '''encoder-decoder'''
snake_case = True
def __init__( self : Optional[int] , **__UpperCAmelCase : Any ):
'''simple docstring'''
super().__init__(**__UpperCAmelCase )
assert (
"encoder" in kwargs and "decoder" in kwargs
), "Config has to be initialized with encoder and decoder config"
_A = kwargs.pop("encoder" )
_A = encoder_config.pop("model_type" )
_A = kwargs.pop("decoder" )
_A = decoder_config.pop("model_type" )
from ..auto.configuration_auto import AutoConfig
_A = AutoConfig.for_model(__UpperCAmelCase , **__UpperCAmelCase )
_A = AutoConfig.for_model(__UpperCAmelCase , **__UpperCAmelCase )
_A = True
@classmethod
def lowerCAmelCase ( cls : str , __UpperCAmelCase : PretrainedConfig , __UpperCAmelCase : PretrainedConfig , **__UpperCAmelCase : str ):
'''simple docstring'''
logger.info("Set `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config" )
_A = True
_A = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **__UpperCAmelCase )
def lowerCAmelCase ( self : List[Any] ):
'''simple docstring'''
_A = copy.deepcopy(self.__dict__ )
_A = self.encoder.to_dict()
_A = self.decoder.to_dict()
_A = self.__class__.model_type
return output
| 79 |
'''simple docstring'''
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class a__( nn.Module ):
def __init__( self : Any , __snake_case : int = 16 , __snake_case : int = 88 , __snake_case : Optional[int] = None , __snake_case : int = 1 , __snake_case : float = 0.0 , __snake_case : int = 32 , __snake_case : Optional[int] = None , __snake_case : bool = False , __snake_case : Optional[int] = None , __snake_case : Optional[int] = None , __snake_case : str = "geglu" , __snake_case : Optional[int] = None , ):
super().__init__()
a : Optional[int] = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__snake_case , attention_head_dim=__snake_case , in_channels=__snake_case , num_layers=__snake_case , dropout=__snake_case , norm_num_groups=__snake_case , cross_attention_dim=__snake_case , attention_bias=__snake_case , sample_size=__snake_case , num_vector_embeds=__snake_case , activation_fn=__snake_case , num_embeds_ada_norm=__snake_case , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
a : Union[str, Any] = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
a : Tuple = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
a : Any = [1, 0]
def lowercase_ ( self : str , __snake_case : List[Any] , __snake_case : List[Any] , __snake_case : Optional[Any]=None , __snake_case : int=None , __snake_case : Dict=None , __snake_case : bool = True , ):
a : Dict = hidden_states
a : Tuple = []
a : Optional[int] = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
a : Union[str, Any] = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
a : Tuple = self.transformer_index_for_condition[i]
a : Union[str, Any] = self.transformers[transformer_index](
__snake_case , encoder_hidden_states=__snake_case , timestep=__snake_case , cross_attention_kwargs=__snake_case , return_dict=__snake_case , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
a : Optional[Any] = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
a : int = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__snake_case ) | 297 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
from collections.abc import Callable
def UpperCamelCase( UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 1_00 , ):
UpperCAmelCase : List[str] = x_start
UpperCAmelCase : str = fnc(UpperCamelCase__ )
UpperCAmelCase : int = 0.0
for _ in range(UpperCamelCase__ ):
# Approximates curve as a sequence of linear lines and sums their length
UpperCAmelCase : Any = (x_end - x_start) / steps + xa
UpperCAmelCase : Dict = fnc(UpperCamelCase__ )
length += math.hypot(xa - xa , fxa - fxa )
# Increment step
UpperCAmelCase : Any = xa
UpperCAmelCase : List[Any] = fxa
return length
if __name__ == "__main__":
def UpperCamelCase( UpperCAmelCase_ ):
return math.sin(10 * x )
print("f(x) = sin(10 * x)")
print("The length of the curve from x = -10 to x = 10 is:")
lowercase__ = 10
while i <= 100000:
print(f'''With {i} steps: {line_length(f, -10, 10, i)}''')
i *= 10
| 363 |
'''simple docstring'''
import argparse
import json
from tqdm import tqdm
def UpperCamelCase( ):
UpperCAmelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--src_path' , type=UpperCAmelCase_ , default='biencoder-nq-dev.json' , help='Path to raw DPR training data' , )
parser.add_argument(
'--evaluation_set' , type=UpperCAmelCase_ , help='where to store parsed evaluation_set file' , )
parser.add_argument(
'--gold_data_path' , type=UpperCAmelCase_ , help='where to store parsed gold_data_path file' , )
UpperCAmelCase : int = parser.parse_args()
with open(args.src_path , 'r' ) as src_file, open(args.evaluation_set , 'w' ) as eval_file, open(
args.gold_data_path , 'w' ) as gold_file:
UpperCAmelCase : int = json.load(UpperCAmelCase_ )
for dpr_record in tqdm(UpperCAmelCase_ ):
UpperCAmelCase : Any = dpr_record['question']
UpperCAmelCase : List[str] = [context['title'] for context in dpr_record['positive_ctxs']]
eval_file.write(question + '\n' )
gold_file.write('\t'.join(UpperCAmelCase_ ) + '\n' )
if __name__ == "__main__":
main()
| 280 | 0 |
snake_case__ : str = '\n# Transformers installation\n! pip install transformers datasets\n# To install from source instead of the last release, comment the command above and uncomment the following one.\n# ! pip install git+https://github.com/huggingface/transformers.git\n'
snake_case__ : int = [{'type': 'code', 'content': INSTALL_CONTENT}]
snake_case__ : Any = {
'{processor_class}': 'FakeProcessorClass',
'{model_class}': 'FakeModelClass',
'{object_class}': 'FakeObjectClass',
}
| 117 |
'''simple docstring'''
def A_ ( snake_case = 100 ):
SCREAMING_SNAKE_CASE:Optional[Any] = set()
SCREAMING_SNAKE_CASE:int = 0
SCREAMING_SNAKE_CASE:Optional[Any] = n + 1 # maximum limit
for a in range(2 , snake_case ):
for b in range(2 , snake_case ):
SCREAMING_SNAKE_CASE:Tuple = a**b # calculates the current power
collect_powers.add(snake_case ) # adds the result to the set
return len(snake_case )
if __name__ == "__main__":
print("Number of terms ", solution(int(str(input()).strip())))
| 139 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
snake_case : Union[str, Any] = {
"configuration_data2vec_audio": ["DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP", "Data2VecAudioConfig"],
"configuration_data2vec_text": [
"DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecTextConfig",
"Data2VecTextOnnxConfig",
],
"configuration_data2vec_vision": [
"DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP",
"Data2VecVisionConfig",
"Data2VecVisionOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Union[str, Any] = [
"DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecAudioForAudioFrameClassification",
"Data2VecAudioForCTC",
"Data2VecAudioForSequenceClassification",
"Data2VecAudioForXVector",
"Data2VecAudioModel",
"Data2VecAudioPreTrainedModel",
]
snake_case : Optional[int] = [
"DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecTextForCausalLM",
"Data2VecTextForMaskedLM",
"Data2VecTextForMultipleChoice",
"Data2VecTextForQuestionAnswering",
"Data2VecTextForSequenceClassification",
"Data2VecTextForTokenClassification",
"Data2VecTextModel",
"Data2VecTextPreTrainedModel",
]
snake_case : Optional[int] = [
"DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST",
"Data2VecVisionForImageClassification",
"Data2VecVisionForMaskedImageModeling",
"Data2VecVisionForSemanticSegmentation",
"Data2VecVisionModel",
"Data2VecVisionPreTrainedModel",
]
if is_tf_available():
snake_case : List[str] = [
"TFData2VecVisionForImageClassification",
"TFData2VecVisionForSemanticSegmentation",
"TFData2VecVisionModel",
"TFData2VecVisionPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
snake_case : List[str] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 371 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
snake_case : List[str] = {"configuration_beit": ["BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BeitConfig", "BeitOnnxConfig"]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : int = ["BeitFeatureExtractor"]
snake_case : Optional[int] = ["BeitImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : List[Any] = [
"BEIT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BeitForImageClassification",
"BeitForMaskedImageModeling",
"BeitForSemanticSegmentation",
"BeitModel",
"BeitPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
snake_case : Dict = [
"FlaxBeitForImageClassification",
"FlaxBeitForMaskedImageModeling",
"FlaxBeitModel",
"FlaxBeitPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
snake_case : int = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 41 | 0 |
__snake_case = 0 # The first color of the flag.
__snake_case = 1 # The second color of the flag.
__snake_case = 2 # The third color of the flag.
__snake_case = (red, white, blue)
def lowerCAmelCase_ ( __lowerCAmelCase )-> list:
'''simple docstring'''
if not sequence:
return []
if len(__lowerCAmelCase ) == 1:
return list(__lowerCAmelCase )
UpperCAmelCase : Union[str, Any] =0
UpperCAmelCase : Optional[Any] =len(__lowerCAmelCase ) - 1
UpperCAmelCase : Tuple =0
while mid <= high:
if sequence[mid] == colors[0]:
UpperCAmelCase , UpperCAmelCase : Union[str, Any] =sequence[mid], sequence[low]
low += 1
mid += 1
elif sequence[mid] == colors[1]:
mid += 1
elif sequence[mid] == colors[2]:
UpperCAmelCase , UpperCAmelCase : List[Any] =sequence[high], sequence[mid]
high -= 1
else:
UpperCAmelCase : List[str] =f'''The elements inside the sequence must contains only {colors} values'''
raise ValueError(__lowerCAmelCase )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
__snake_case = input('''Enter numbers separated by commas:\n''').strip()
__snake_case = [int(item.strip()) for item in user_input.split(''',''')]
print(f'{dutch_national_flag_sort(unsorted)}')
| 348 | from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__snake_case = {'''configuration_opt''': ['''OPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''OPTConfig''']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''OPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''OPTForCausalLM''',
'''OPTModel''',
'''OPTPreTrainedModel''',
'''OPTForSequenceClassification''',
'''OPTForQuestionAnswering''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = ['''TFOPTForCausalLM''', '''TFOPTModel''', '''TFOPTPreTrainedModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__snake_case = [
'''FlaxOPTForCausalLM''',
'''FlaxOPTModel''',
'''FlaxOPTPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
__snake_case = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 348 | 1 |
"""simple docstring"""
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def UpperCAmelCase ( UpperCamelCase__ , UpperCamelCase__=0.9_9_9 , UpperCamelCase__="cosine" , ):
"""simple docstring"""
if alpha_transform_type == "cosine":
def alpha_bar_fn(UpperCamelCase__ ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(UpperCamelCase__ ):
return math.exp(t * -1_2.0 )
else:
raise ValueError(F'''Unsupported alpha_tranform_type: {alpha_transform_type}''' )
A__ = []
for i in range(UpperCamelCase__ ):
A__ = i / num_diffusion_timesteps
A__ = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(UpperCamelCase__ ) / alpha_bar_fn(UpperCamelCase__ ) , UpperCamelCase__ ) )
return torch.tensor(UpperCamelCase__ , dtype=torch.floataa )
class UpperCamelCase__( __A , __A ):
lowerCAmelCase__ : List[str] = [e.name for e in KarrasDiffusionSchedulers]
lowerCAmelCase__ : Optional[Any] = 2
@register_to_config
def __init__( self ,__UpperCAmelCase = 10_00 ,__UpperCAmelCase = 0.0_0_0_8_5 ,__UpperCAmelCase = 0.0_1_2 ,__UpperCAmelCase = "linear" ,__UpperCAmelCase = None ,__UpperCAmelCase = "epsilon" ,__UpperCAmelCase = "linspace" ,__UpperCAmelCase = 0 ,) -> str:
if trained_betas is not None:
A__ = torch.tensor(__UpperCAmelCase ,dtype=torch.floataa )
elif beta_schedule == "linear":
A__ = torch.linspace(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
A__ = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,__UpperCAmelCase ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
A__ = betas_for_alpha_bar(__UpperCAmelCase )
else:
raise NotImplementedError(f'''{beta_schedule} does is not implemented for {self.__class__}''' )
A__ = 1.0 - self.betas
A__ = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase=None ) -> int:
if schedule_timesteps is None:
A__ = self.timesteps
A__ = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
A__ = 1 if len(__UpperCAmelCase ) > 1 else 0
else:
A__ = timestep.cpu().item() if torch.is_tensor(__UpperCAmelCase ) else timestep
A__ = self._index_counter[timestep_int]
return indices[pos].item()
@property
def snake_case__ ( self ) -> int:
# standard deviation of the initial noise distribution
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,) -> torch.FloatTensor:
A__ = self.index_for_timestep(__UpperCAmelCase )
if self.state_in_first_order:
A__ = self.sigmas[step_index]
else:
A__ = self.sigmas_interpol[step_index]
A__ = sample / ((sigma**2 + 1) ** 0.5)
return sample
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase = None ,__UpperCAmelCase = None ,) -> Dict:
A__ = num_inference_steps
A__ = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
A__ = np.linspace(0 ,num_train_timesteps - 1 ,__UpperCAmelCase ,dtype=__UpperCAmelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
A__ = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A__ = (np.arange(0 ,__UpperCAmelCase ) * step_ratio).round()[::-1].copy().astype(__UpperCAmelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
A__ = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
A__ = (np.arange(__UpperCAmelCase ,0 ,-step_ratio )).round().copy().astype(__UpperCAmelCase )
timesteps -= 1
else:
raise ValueError(
f'''{self.config.timestep_spacing} is not supported. Please make sure to choose one of \'linspace\', \'leading\' or \'trailing\'.''' )
A__ = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
A__ = torch.from_numpy(np.log(__UpperCAmelCase ) ).to(__UpperCAmelCase )
A__ = np.interp(__UpperCAmelCase ,np.arange(0 ,len(__UpperCAmelCase ) ) ,__UpperCAmelCase )
A__ = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
A__ = torch.from_numpy(__UpperCAmelCase ).to(device=__UpperCAmelCase )
# interpolate sigmas
A__ = sigmas.log().lerp(sigmas.roll(1 ).log() ,0.5 ).exp()
A__ = torch.cat([sigmas[:1], sigmas[1:].repeat_interleave(2 ), sigmas[-1:]] )
A__ = torch.cat(
[sigmas_interpol[:1], sigmas_interpol[1:].repeat_interleave(2 ), sigmas_interpol[-1:]] )
if str(__UpperCAmelCase ).startswith('mps' ):
# mps does not support float64
A__ = torch.from_numpy(__UpperCAmelCase ).to(__UpperCAmelCase ,dtype=torch.floataa )
else:
A__ = torch.from_numpy(__UpperCAmelCase ).to(__UpperCAmelCase )
# interpolate timesteps
A__ = self.sigma_to_t(__UpperCAmelCase ).to(__UpperCAmelCase ,dtype=timesteps.dtype )
A__ = torch.stack((timesteps_interpol[1:-1, None], timesteps[1:, None]) ,dim=-1 ).flatten()
A__ = torch.cat([timesteps[:1], interleaved_timesteps] )
A__ = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
A__ = defaultdict(__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ) -> Union[str, Any]:
# get log sigma
A__ = sigma.log()
# get distribution
A__ = log_sigma - self.log_sigmas[:, None]
# get sigmas range
A__ = dists.ge(0 ).cumsum(dim=0 ).argmax(dim=0 ).clamp(max=self.log_sigmas.shape[0] - 2 )
A__ = low_idx + 1
A__ = self.log_sigmas[low_idx]
A__ = self.log_sigmas[high_idx]
# interpolate sigmas
A__ = (low - log_sigma) / (low - high)
A__ = w.clamp(0 ,1 )
# transform interpolation to time range
A__ = (1 - w) * low_idx + w * high_idx
A__ = t.view(sigma.shape )
return t
@property
def snake_case__ ( self ) -> str:
return self.sample is None
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase = True ,) -> Union[SchedulerOutput, Tuple]:
A__ = self.index_for_timestep(__UpperCAmelCase )
# advance index counter by 1
A__ = timestep.cpu().item() if torch.is_tensor(__UpperCAmelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
A__ = self.sigmas[step_index]
A__ = self.sigmas_interpol[step_index + 1]
A__ = self.sigmas[step_index + 1]
else:
# 2nd order / KDPM2's method
A__ = self.sigmas[step_index - 1]
A__ = self.sigmas_interpol[step_index]
A__ = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
A__ = 0
A__ = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
A__ = sigma_hat if self.state_in_first_order else sigma_interpol
A__ = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
A__ = sigma_hat if self.state_in_first_order else sigma_interpol
A__ = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
raise NotImplementedError('prediction_type not implemented yet: sample' )
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`''' )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
A__ = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
A__ = sigma_interpol - sigma_hat
# store for 2nd order step
A__ = sample
else:
# DPM-Solver-2
# 2. Convert to an ODE derivative for 2nd order
A__ = (sample - pred_original_sample) / sigma_interpol
# 3. delta timestep
A__ = sigma_next - sigma_hat
A__ = self.sample
A__ = None
A__ = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=__UpperCAmelCase )
def snake_case__ ( self ,__UpperCAmelCase ,__UpperCAmelCase ,__UpperCAmelCase ,) -> torch.FloatTensor:
# Make sure sigmas and timesteps have the same device and dtype as original_samples
A__ = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(__UpperCAmelCase ):
# mps does not support float64
A__ = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
A__ = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
A__ = self.timesteps.to(original_samples.device )
A__ = timesteps.to(original_samples.device )
A__ = [self.index_for_timestep(__UpperCAmelCase ,__UpperCAmelCase ) for t in timesteps]
A__ = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
A__ = sigma.unsqueeze(-1 )
A__ = original_samples + noise * sigma
return noisy_samples
def __len__( self ) -> int:
return self.config.num_train_timesteps
| 154 | """simple docstring"""
import os
def UpperCAmelCase ( ):
"""simple docstring"""
with open(os.path.dirname(UpperCamelCase__ ) + '/grid.txt' ) as f:
A__ = [] # noqa: E741
for _ in range(20 ):
l.append([int(UpperCamelCase__ ) for x in f.readline().split()] )
A__ = 0
# right
for i in range(20 ):
for j in range(17 ):
A__ = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
A__ = temp
# down
for i in range(17 ):
for j in range(20 ):
A__ = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
A__ = temp
# diagonal 1
for i in range(17 ):
for j in range(17 ):
A__ = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
A__ = temp
# diagonal 2
for i in range(17 ):
for j in range(3 , 20 ):
A__ = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
A__ = temp
return maximum
if __name__ == "__main__":
print(solution())
| 154 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.