code stringlengths 87 55.2k | code_codestyle int64 0 349 | style_context stringlengths 135 49.1k | style_context_codestyle int64 0 349 | label int64 0 1 |
|---|---|---|---|---|
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {
"""configuration_conditional_detr""": [
"""CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ConditionalDetrConfig""",
"""ConditionalDetrOnnxConfig""",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""ConditionalDetrFeatureExtractor"""]
lowerCamelCase__ = ["""ConditionalDetrImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ConditionalDetrForObjectDetection""",
"""ConditionalDetrForSegmentation""",
"""ConditionalDetrModel""",
"""ConditionalDetrPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 86 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : str = ShapEImgaImgPipeline
A_ : str = ['image']
A_ : int = ['image']
A_ : Tuple = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
A_ : Tuple = False
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return self.time_input_dim * 4
@property
def __lowerCamelCase ( self ):
return 8
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Any = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
__lowerCAmelCase : Tuple = CLIPVisionModel(_SCREAMING_SNAKE_CASE )
return model
@property
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = CLIPImageProcessor(
crop_size=2_24 , do_center_crop=_SCREAMING_SNAKE_CASE , do_normalize=_SCREAMING_SNAKE_CASE , do_resize=_SCREAMING_SNAKE_CASE , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_24 , )
return image_processor
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Optional[Any] = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__lowerCAmelCase : List[Any] = PriorTransformer(**_SCREAMING_SNAKE_CASE )
return model
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Dict = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__lowerCAmelCase : int = ShapERenderer(**_SCREAMING_SNAKE_CASE )
return model
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.dummy_prior
__lowerCAmelCase : List[Any] = self.dummy_image_encoder
__lowerCAmelCase : int = self.dummy_image_processor
__lowerCAmelCase : Any = self.dummy_renderer
__lowerCAmelCase : Any = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=10_24 , prediction_type='sample' , use_karras_sigmas=_SCREAMING_SNAKE_CASE , clip_sample=_SCREAMING_SNAKE_CASE , clip_sample_range=1.0 , )
__lowerCAmelCase : Tuple = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
__lowerCAmelCase : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
__lowerCAmelCase : int = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : str = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = 'cpu'
__lowerCAmelCase : Dict = self.get_dummy_components()
__lowerCAmelCase : Optional[int] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = pipe(**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Any = output.images[0]
__lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__lowerCAmelCase : List[Any] = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = torch_device == 'cpu'
__lowerCAmelCase : Optional[Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_SCREAMING_SNAKE_CASE , relax_max_difference=_SCREAMING_SNAKE_CASE , )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.get_dummy_components()
__lowerCAmelCase : List[str] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = 1
__lowerCAmelCase : List[str] = 2
__lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
for key in inputs.keys():
if key in self.batch_params:
__lowerCAmelCase : Optional[Any] = batch_size * [inputs[key]]
__lowerCAmelCase : List[str] = pipe(**_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' )
__lowerCAmelCase : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy' )
__lowerCAmelCase : Union[str, Any] = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' )
__lowerCAmelCase : Dict = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
__lowerCAmelCase : int = pipe(
_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) | 86 | 1 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Tuple = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def __lowerCAmelCase ():
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""FNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""FNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""FNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FNetForMaskedLM""",
"""FNetForMultipleChoice""",
"""FNetForNextSentencePrediction""",
"""FNetForPreTraining""",
"""FNetForQuestionAnswering""",
"""FNetForSequenceClassification""",
"""FNetForTokenClassification""",
"""FNetLayer""",
"""FNetModel""",
"""FNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 86 | 1 |
"""simple docstring"""
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def __lowerCAmelCase (_UpperCamelCase=32 , _UpperCamelCase=10 , _UpperCamelCase=100 , _UpperCamelCase=1026 , _UpperCamelCase=True , _UpperCamelCase="data/tokenized_stories_train_wikitext103.jbl" , _UpperCamelCase="igf_context_pairs.jbl" , ):
set_seed(3 )
# generate train_data and objective_set
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = generate_datasets(
_UpperCamelCase , _UpperCamelCase , number=_UpperCamelCase , min_len=1026 , trim=_UpperCamelCase )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
__lowerCAmelCase : List[Any] = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
# load pretrained model
__lowerCAmelCase : int = load_gpta('gpt2' ).to(_UpperCamelCase )
print('computing perplexity on objective set' )
__lowerCAmelCase : Optional[int] = compute_perplexity(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ).item()
print('perplexity on objective set:' , _UpperCamelCase )
# collect igf pairs and save to file demo.jbl
collect_objective_set(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase=15 , _UpperCamelCase=128 , _UpperCamelCase=100 , _UpperCamelCase="igf_model.pt" , ):
set_seed(42 )
# Load pre-trained model
__lowerCAmelCase : List[Any] = GPTaLMHeadModel.from_pretrained('gpt2' )
# Initialize secondary learner to use embedding weights of model
__lowerCAmelCase : Dict = SecondaryLearner(_UpperCamelCase )
# Train secondary learner
__lowerCAmelCase : Optional[Any] = train_secondary_learner(
_UpperCamelCase , _UpperCamelCase , max_epochs=_UpperCamelCase , batch_size=_UpperCamelCase , eval_freq=100 , igf_model_path=_UpperCamelCase , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=32 , _UpperCamelCase=1000 , _UpperCamelCase=16 , _UpperCamelCase=1.0 , _UpperCamelCase=recopy_gpta , _UpperCamelCase=None , _UpperCamelCase=10 , _UpperCamelCase="gpt2_finetuned.pt" , ):
__lowerCAmelCase : Any = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu' )
__lowerCAmelCase : Union[str, Any] = RandomSampler(_UpperCamelCase )
__lowerCAmelCase : Any = DataLoader(_UpperCamelCase , sampler=_UpperCamelCase )
__lowerCAmelCase : str = max_steps // (len(_UpperCamelCase )) + 1
__lowerCAmelCase : int = 0
__lowerCAmelCase : int = torch.zeros((1, context_len) , dtype=torch.long , device=_UpperCamelCase )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = recopy_model(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
model.train()
if secondary_learner is not None:
secondary_learner.to(_UpperCamelCase )
secondary_learner.eval()
__lowerCAmelCase : List[Any] = []
__lowerCAmelCase : Any = 0
__lowerCAmelCase : str = []
__lowerCAmelCase : str = []
# Compute the performance of the transformer model at the beginning
__lowerCAmelCase : List[str] = compute_perplexity(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
test_perps.append(_UpperCamelCase )
print('Test perplexity, step' , _UpperCamelCase , ':' , _UpperCamelCase )
for epoch in range(int(_UpperCamelCase ) ):
for step, example in enumerate(_UpperCamelCase ):
torch.cuda.empty_cache()
__lowerCAmelCase : Dict = random.randint(0 , example.size(2 ) - context_len - 1 )
__lowerCAmelCase : Union[str, Any] = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
__lowerCAmelCase : List[Any] = model(_UpperCamelCase , labels=_UpperCamelCase )
__lowerCAmelCase : Any = True
if secondary_learner is not None:
__lowerCAmelCase : List[Any] = secondary_learner.forward(
torch.tensor(_UpperCamelCase , dtype=torch.long , device=_UpperCamelCase ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(_UpperCamelCase ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
__lowerCAmelCase : List[Any] = -1
if predicted_q < threshold:
__lowerCAmelCase : Dict = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
__lowerCAmelCase : int = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
__lowerCAmelCase : Optional[Any] = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
__lowerCAmelCase : Optional[Any] = compute_perplexity(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
test_perps.append(_UpperCamelCase )
print('Test perplexity, step' , _UpperCamelCase , ':' , _UpperCamelCase )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , _UpperCamelCase )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def __lowerCAmelCase ():
__lowerCAmelCase : Tuple = argparse.ArgumentParser(description='Fine-tune a transformer model with IGF on a language modeling task' )
# Required parameters
parser.add_argument(
'--data_dir' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='The input data dir. Should contain data files for WikiText.' , )
parser.add_argument(
'--model_name_or_path' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--data_file' , type=_UpperCamelCase , default=_UpperCamelCase , help=(
'A jbl file containing tokenized data which can be split as objective dataset, '
'train_dataset and test_dataset.'
) , )
parser.add_argument(
'--igf_data_file' , type=_UpperCamelCase , default=_UpperCamelCase , help='A jbl file containing the context and information gain pairs to train secondary learner.' , )
parser.add_argument(
'--output_dir' , default=_UpperCamelCase , type=_UpperCamelCase , required=_UpperCamelCase , help='The output directory where the final fine-tuned model is stored.' , )
parser.add_argument(
'--tokenizer_name' , default=_UpperCamelCase , type=_UpperCamelCase , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument('--seed' , type=_UpperCamelCase , default=_UpperCamelCase , help='A seed for reproducible training.' )
parser.add_argument(
'--context_len' , default=32 , type=_UpperCamelCase , help=(
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
) , )
parser.add_argument(
'--size_objective_set' , default=100 , type=_UpperCamelCase , help='number of articles that are long enough to be used as our objective set' , )
parser.add_argument(
'--eval_freq' , default=100 , type=_UpperCamelCase , help='secondary model evaluation is triggered at eval_freq' )
parser.add_argument('--max_steps' , default=1000 , type=_UpperCamelCase , help='To calculate training epochs' )
parser.add_argument(
'--secondary_learner_batch_size' , default=128 , type=_UpperCamelCase , help='batch size of training data for secondary learner' , )
parser.add_argument(
'--batch_size' , default=16 , type=_UpperCamelCase , help='batch size of training data of language model(gpt2) ' )
parser.add_argument(
'--eval_interval' , default=10 , type=_UpperCamelCase , help=(
'decay the selectivity of our secondary learner filter from'
'1 standard deviation above average to 1 below average after 10 batches'
) , )
parser.add_argument(
'--number' , default=100 , type=_UpperCamelCase , help='The number of examples split to be used as objective_set/test_data' )
parser.add_argument(
'--min_len' , default=1026 , type=_UpperCamelCase , help='The minimum length of the article to be used as objective set' )
parser.add_argument(
'--secondary_learner_max_epochs' , default=15 , type=_UpperCamelCase , help='number of epochs to train secondary learner' )
parser.add_argument('--trim' , default=_UpperCamelCase , type=_UpperCamelCase , help='truncate the example if it exceeds context length' )
parser.add_argument(
'--threshold' , default=1.0 , type=_UpperCamelCase , help=(
'The threshold value used by secondary learner to filter the train_data and allow only'
' informative data as input to the model'
) , )
parser.add_argument('--finetuned_model_name' , default='gpt2_finetuned.pt' , type=_UpperCamelCase , help='finetuned_model_name' )
parser.add_argument(
'--recopy_model' , default=_UpperCamelCase , type=_UpperCamelCase , help='Reset the model to the original pretrained GPT-2 weights after each iteration' , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=_UpperCamelCase , data_file='data/tokenized_stories_train_wikitext103.jbl' , igf_data_file='igf_context_pairs.jbl' , )
# Load train data for secondary learner
__lowerCAmelCase : str = joblib.load('data/IGF_values.jbl' )
# Train secondary learner
__lowerCAmelCase : List[Any] = training_secondary_learner(
_UpperCamelCase , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path='igf_model.pt' , )
# load pretrained gpt2 model
__lowerCAmelCase : str = GPTaLMHeadModel.from_pretrained('gpt2' )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
__lowerCAmelCase , __lowerCAmelCase : Any = generate_datasets(
context_len=32 , file='data/tokenized_stories_train_wikitext103.jbl' , number=100 , min_len=1026 , trim=_UpperCamelCase )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=_UpperCamelCase , secondary_learner=_UpperCamelCase , eval_interval=10 , finetuned_model_name='gpt2_finetuned.pt' , )
if __name__ == "__main__":
main() | 86 |
"""simple docstring"""
import math
import sys
def __lowerCAmelCase (_UpperCamelCase ):
if number != int(_UpperCamelCase ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
__lowerCAmelCase : Any = [-1] * (number + 1)
__lowerCAmelCase : List[Any] = 0
for i in range(1 , number + 1 ):
__lowerCAmelCase : List[Any] = sys.maxsize
__lowerCAmelCase : Optional[int] = int(math.sqrt(_UpperCamelCase ) )
for j in range(1 , root + 1 ):
__lowerCAmelCase : Optional[Any] = 1 + answers[i - (j**2)]
__lowerCAmelCase : Any = min(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : List[str] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 | 1 |
"""simple docstring"""
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A__ ( _lowerCamelCase):
A_ : Dict = (DDPMParallelScheduler,)
def __lowerCamelCase ( self , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = {
'num_train_timesteps': 10_00,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**_SCREAMING_SNAKE_CASE )
return config
def __lowerCamelCase ( self ):
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_SCREAMING_SNAKE_CASE , beta_end=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
self.check_over_configs(thresholding=_SCREAMING_SNAKE_CASE )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_SCREAMING_SNAKE_CASE , prediction_type=_SCREAMING_SNAKE_CASE , sample_max_value=_SCREAMING_SNAKE_CASE , )
def __lowerCamelCase ( self ):
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.scheduler_classes[0]
__lowerCAmelCase : Dict = self.get_scheduler_config()
__lowerCAmelCase : Any = scheduler_class(**_SCREAMING_SNAKE_CASE )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.02 ) ) < 1E-5
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
__lowerCAmelCase : str = self.get_scheduler_config()
__lowerCAmelCase : Any = scheduler_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = self.dummy_model()
__lowerCAmelCase : List[Any] = self.dummy_sample_deter
__lowerCAmelCase : int = self.dummy_sample_deter + 0.1
__lowerCAmelCase : Tuple = self.dummy_sample_deter - 0.1
__lowerCAmelCase : Tuple = samplea.shape[0]
__lowerCAmelCase : Tuple = torch.stack([samplea, samplea, samplea] , dim=0 )
__lowerCAmelCase : str = torch.arange(_SCREAMING_SNAKE_CASE )[0:3, None].repeat(1 , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
__lowerCAmelCase : Tuple = scheduler.batch_step_no_noise(_SCREAMING_SNAKE_CASE , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
__lowerCAmelCase : Optional[Any] = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : int = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 1153.1833 ) < 1E-2
assert abs(result_mean.item() - 0.5005 ) < 1E-3
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
__lowerCAmelCase : Any = self.get_scheduler_config()
__lowerCAmelCase : Optional[Any] = scheduler_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = len(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = self.dummy_model()
__lowerCAmelCase : List[str] = self.dummy_sample_deter
__lowerCAmelCase : str = torch.manual_seed(0 )
for t in reversed(range(_SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
__lowerCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
__lowerCAmelCase : Tuple = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
__lowerCAmelCase : Any = pred_prev_sample
__lowerCAmelCase : Union[str, Any] = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : List[str] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = self.scheduler_classes[0]
__lowerCAmelCase : str = self.get_scheduler_config(prediction_type='v_prediction' )
__lowerCAmelCase : Tuple = scheduler_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = len(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = self.dummy_model()
__lowerCAmelCase : Any = self.dummy_sample_deter
__lowerCAmelCase : int = torch.manual_seed(0 )
for t in reversed(range(_SCREAMING_SNAKE_CASE ) ):
# 1. predict noise residual
__lowerCAmelCase : str = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
__lowerCAmelCase : List[Any] = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
__lowerCAmelCase : int = pred_prev_sample
__lowerCAmelCase : int = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Optional[Any] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = self.scheduler_classes[0]
__lowerCAmelCase : Optional[Any] = self.get_scheduler_config()
__lowerCAmelCase : Tuple = scheduler_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = scheduler.timesteps
for i, timestep in enumerate(_SCREAMING_SNAKE_CASE ):
if i == len(_SCREAMING_SNAKE_CASE ) - 1:
__lowerCAmelCase : Optional[int] = -1
else:
__lowerCAmelCase : Dict = timesteps[i + 1]
__lowerCAmelCase : Optional[int] = scheduler.previous_timestep(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = prev_t.item()
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
__lowerCAmelCase : List[Any] = self.get_scheduler_config()
__lowerCAmelCase : Union[str, Any] = scheduler_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = [1_00, 87, 50, 51, 0]
with self.assertRaises(_SCREAMING_SNAKE_CASE , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
__lowerCAmelCase : List[str] = self.get_scheduler_config()
__lowerCAmelCase : Optional[Any] = scheduler_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = [1_00, 87, 50, 1, 0]
__lowerCAmelCase : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
with self.assertRaises(_SCREAMING_SNAKE_CASE , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=_SCREAMING_SNAKE_CASE , timesteps=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
__lowerCAmelCase : Any = self.get_scheduler_config()
__lowerCAmelCase : Optional[int] = scheduler_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_SCREAMING_SNAKE_CASE , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=_SCREAMING_SNAKE_CASE ) | 86 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=14 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=0.02 , ):
__lowerCAmelCase : Union[str, Any] = parent
__lowerCAmelCase : Any = batch_size
__lowerCAmelCase : Any = seq_length
__lowerCAmelCase : Optional[Any] = is_training
__lowerCAmelCase : Any = use_input_mask
__lowerCAmelCase : Any = use_token_type_ids
__lowerCAmelCase : Tuple = use_labels
__lowerCAmelCase : Optional[Any] = vocab_size
__lowerCAmelCase : Tuple = hidden_size
__lowerCAmelCase : str = rotary_dim
__lowerCAmelCase : Union[str, Any] = num_hidden_layers
__lowerCAmelCase : Union[str, Any] = num_attention_heads
__lowerCAmelCase : int = intermediate_size
__lowerCAmelCase : List[str] = hidden_act
__lowerCAmelCase : int = hidden_dropout_prob
__lowerCAmelCase : Any = attention_probs_dropout_prob
__lowerCAmelCase : List[Any] = max_position_embeddings
__lowerCAmelCase : Optional[Any] = initializer_range
__lowerCAmelCase : Tuple = None
__lowerCAmelCase : int = vocab_size - 1
__lowerCAmelCase : Dict = vocab_size - 1
__lowerCAmelCase : int = vocab_size - 1
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : List[str] = None
if self.use_input_mask:
__lowerCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : Optional[int] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=_SCREAMING_SNAKE_CASE , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = config_and_inputs
__lowerCAmelCase : Dict = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = 20
__lowerCAmelCase : List[str] = model_class_name(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = model.init_cache(input_ids.shape[0] , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' )
__lowerCAmelCase : Optional[Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCAmelCase : Any = model(
input_ids[:, :-1] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCAmelCase : int = model(
input_ids[:, -1:] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = 20
__lowerCAmelCase : List[str] = model_class_name(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__lowerCAmelCase : List[str] = model.init_cache(input_ids.shape[0] , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCAmelCase : Optional[Any] = model(
input_ids[:, :-1] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : str = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCAmelCase : Tuple = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
@require_flax
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : Tuple = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
A_ : str = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = FlaxGPTJModelTester(self )
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@tooslow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' )
__lowerCAmelCase : Optional[int] = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
__lowerCAmelCase : Any = False
__lowerCAmelCase : Any = model.config.eos_token_id
__lowerCAmelCase : Union[str, Any] = jax.jit(model.generate )
__lowerCAmelCase : Optional[Any] = jit_generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences
__lowerCAmelCase : str = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@is_pt_flax_cross_test
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCAmelCase : str = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCAmelCase : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCAmelCase : Optional[int] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : List[Any] = pt_inputs['input_ids'].shape
__lowerCAmelCase : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = 0
__lowerCAmelCase : Tuple = 1
__lowerCAmelCase : List[str] = 0
__lowerCAmelCase : Any = 1
__lowerCAmelCase : Optional[Any] = pt_model_class(_SCREAMING_SNAKE_CASE ).eval()
__lowerCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
__lowerCAmelCase : int = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = fx_state
with torch.no_grad():
__lowerCAmelCase : Union[str, Any] = pt_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
__lowerCAmelCase : str = fx_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = model_class.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = fx_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(
len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCAmelCase : List[str] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCAmelCase : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCAmelCase : str = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = pt_model_class(_SCREAMING_SNAKE_CASE ).eval()
__lowerCAmelCase : Tuple = model_class(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
__lowerCAmelCase : List[str] = load_flax_weights_in_pytorch_model(_SCREAMING_SNAKE_CASE , fx_model.params )
__lowerCAmelCase , __lowerCAmelCase : int = pt_inputs['input_ids'].shape
__lowerCAmelCase : List[str] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = 0
__lowerCAmelCase : Optional[Any] = 1
__lowerCAmelCase : Optional[int] = 0
__lowerCAmelCase : Optional[Any] = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__lowerCAmelCase : List[str] = pt_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
__lowerCAmelCase : Optional[int] = fx_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = pt_model_class.from_pretrained(_SCREAMING_SNAKE_CASE , from_flax=_SCREAMING_SNAKE_CASE )
with torch.no_grad():
__lowerCAmelCase : Any = pt_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(
len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__lowerCAmelCase : Optional[int] = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
__lowerCAmelCase : List[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) | 86 | 1 |
"""simple docstring"""
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"""
},
"""merges_file""": {
"""allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"""
},
}
lowerCamelCase__ = {"""allegro/herbert-base-cased""": 514}
lowerCamelCase__ = {}
class A__ ( _lowerCamelCase):
A_ : Dict = VOCAB_FILES_NAMES
A_ : str = PRETRAINED_VOCAB_FILES_MAP
A_ : Dict = PRETRAINED_INIT_CONFIGURATION
A_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : List[Any] = HerbertTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<mask>" , _SCREAMING_SNAKE_CASE="</s>" , **_SCREAMING_SNAKE_CASE , ):
super().__init__(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
__lowerCAmelCase : Union[str, Any] = [self.cls_token_id]
__lowerCAmelCase : Dict = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
if token_ids_a is None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1]
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
__lowerCAmelCase : Optional[int] = [self.sep_token_id]
__lowerCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
__lowerCAmelCase : Union[str, Any] = self._tokenizer.model.save(_SCREAMING_SNAKE_CASE , name=_SCREAMING_SNAKE_CASE )
return tuple(_SCREAMING_SNAKE_CASE ) | 86 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , ):
__lowerCAmelCase : Tuple = parent
__lowerCAmelCase : Optional[int] = 13
__lowerCAmelCase : List[Any] = 7
__lowerCAmelCase : int = True
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : List[Any] = True
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : Optional[Any] = 99
__lowerCAmelCase : int = 3_84
__lowerCAmelCase : Union[str, Any] = 2
__lowerCAmelCase : Tuple = 4
__lowerCAmelCase : str = 37
__lowerCAmelCase : Any = 'gelu'
__lowerCAmelCase : List[str] = 0.1
__lowerCAmelCase : Any = 0.1
__lowerCAmelCase : Union[str, Any] = 5_12
__lowerCAmelCase : int = 16
__lowerCAmelCase : Union[str, Any] = 2
__lowerCAmelCase : int = 0.02
__lowerCAmelCase : Dict = 3
__lowerCAmelCase : Tuple = 4
__lowerCAmelCase : Tuple = 1_28
__lowerCAmelCase : Optional[int] = 2
__lowerCAmelCase : List[str] = 9
__lowerCAmelCase : int = 1
__lowerCAmelCase : int = None
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : Optional[int] = None
if self.use_input_mask:
__lowerCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : Tuple = None
if self.use_token_type_ids:
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase : Optional[Any] = None
__lowerCAmelCase : Dict = None
__lowerCAmelCase : Union[str, Any] = None
if self.use_labels:
__lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase : Union[str, Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_SCREAMING_SNAKE_CASE , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = TFConvBertModel(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__lowerCAmelCase : Tuple = [input_ids, input_mask]
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = TFConvBertForMaskedLM(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = self.num_labels
__lowerCAmelCase : Optional[Any] = TFConvBertForSequenceClassification(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = self.num_choices
__lowerCAmelCase : List[str] = TFConvBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase : Dict = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase : Union[str, Any] = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase : Tuple = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__lowerCAmelCase : str = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = self.num_labels
__lowerCAmelCase : Any = TFConvBertForTokenClassification(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = TFConvBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) : List[str] = config_and_inputs
__lowerCAmelCase : Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : List[str] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A_ : str = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A_ : List[Any] = False
A_ : str = False
A_ : List[Any] = False
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = TFConvBertModelTester(self )
__lowerCAmelCase : Any = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Any = True
__lowerCAmelCase : Dict = True
if hasattr(_SCREAMING_SNAKE_CASE , 'use_cache' ):
__lowerCAmelCase : int = True
__lowerCAmelCase : List[str] = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__lowerCAmelCase : str = getattr(self.model_tester , 'key_length' , _SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
__lowerCAmelCase : str = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = len(model(_SCREAMING_SNAKE_CASE ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE , saved_model=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = os.path.join(_SCREAMING_SNAKE_CASE , 'saved_model' , '1' )
__lowerCAmelCase : int = tf.keras.models.load_model(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
__lowerCAmelCase : List[str] = outputs['encoder_hidden_states']
__lowerCAmelCase : Tuple = outputs['encoder_attentions']
else:
__lowerCAmelCase : Optional[int] = outputs['hidden_states']
__lowerCAmelCase : Tuple = outputs['attentions']
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Optional[Any] = True
__lowerCAmelCase : List[Any] = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
__lowerCAmelCase : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__lowerCAmelCase : Tuple = getattr(self.model_tester , 'key_length' , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = getattr(self.model_tester , 'key_length' , _SCREAMING_SNAKE_CASE )
def check_decoder_attentions_output(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
self.assertEqual(out_len % 2 , 0 )
__lowerCAmelCase : Optional[Any] = outputs.decoder_attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : Optional[int] = False
__lowerCAmelCase : List[Any] = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Tuple = len(_SCREAMING_SNAKE_CASE )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
__lowerCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_decoder_attentions_output(_SCREAMING_SNAKE_CASE )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowerCAmelCase : Optional[Any] = True
__lowerCAmelCase : str = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
__lowerCAmelCase : Dict = True
__lowerCAmelCase : Optional[Any] = True
__lowerCAmelCase : List[Any] = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_SCREAMING_SNAKE_CASE ) )
self.assertEqual(model.config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
@require_tf
class A__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
__lowerCAmelCase : int = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowerCAmelCase : Tuple = model(_SCREAMING_SNAKE_CASE )[0]
__lowerCAmelCase : Tuple = [1, 6, 7_68]
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) | 86 | 1 |
"""simple docstring"""
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : int = cva.getAffineTransform(_UpperCamelCase , _UpperCamelCase )
return cva.warpAffine(_UpperCamelCase , _UpperCamelCase , (rows, cols) )
if __name__ == "__main__":
# read original image
lowerCamelCase__ = cva.imread(
str(Path(__file__).resolve().parent.parent / """image_data""" / """lena.jpg""")
)
# turn image in gray scale value
lowerCamelCase__ = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
lowerCamelCase__ , lowerCamelCase__ = gray_img.shape
# set different points to rotate image
lowerCamelCase__ = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
lowerCamelCase__ = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
lowerCamelCase__ = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
lowerCamelCase__ = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
lowerCamelCase__ = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
lowerCamelCase__ = plt.figure(1)
lowerCamelCase__ = ["""Original""", """Rotation 1""", """Rotation 2""", """Rotation 3"""]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, """gray""")
plt.title(titles[i])
plt.axis("""off""")
plt.subplots_adjust(left=0.0, bottom=0.0_5, right=1.0, top=0.9_5)
plt.show() | 86 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class A__ ( unittest.TestCase):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=4_00 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 / 2_55 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__lowerCAmelCase : Any = size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33}
__lowerCAmelCase : Optional[int] = parent
__lowerCAmelCase : int = batch_size
__lowerCAmelCase : str = num_channels
__lowerCAmelCase : Optional[int] = min_resolution
__lowerCAmelCase : List[Any] = max_resolution
__lowerCAmelCase : Union[str, Any] = do_resize
__lowerCAmelCase : Optional[Any] = size
__lowerCAmelCase : Dict = do_rescale
__lowerCAmelCase : Optional[Any] = rescale_factor
__lowerCAmelCase : Any = do_normalize
__lowerCAmelCase : List[str] = image_mean
__lowerCAmelCase : Union[str, Any] = image_std
__lowerCAmelCase : Optional[int] = do_pad
def __lowerCamelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
if not batched:
__lowerCAmelCase : str = image_inputs[0]
if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ):
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = image.size
else:
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = image.shape[1], image.shape[2]
if w < h:
__lowerCAmelCase : str = int(self.size['shortest_edge'] * h / w )
__lowerCAmelCase : Optional[int] = self.size['shortest_edge']
elif w > h:
__lowerCAmelCase : str = self.size['shortest_edge']
__lowerCAmelCase : Union[str, Any] = int(self.size['shortest_edge'] * w / h )
else:
__lowerCAmelCase : str = self.size['shortest_edge']
__lowerCAmelCase : Optional[Any] = self.size['shortest_edge']
else:
__lowerCAmelCase : str = []
for image in image_inputs:
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCAmelCase : Any = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0]
__lowerCAmelCase : Dict = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : List[str] = DetrImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = DetrImageProcessingTester(self )
@property
def __lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_rescale' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'rescale_factor' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
# Initialize image_processing
__lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__lowerCAmelCase : int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase : int = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
# Initialize image_processing
__lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
__lowerCAmelCase : Tuple = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : Tuple = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
# Initialize image_processing
__lowerCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
__lowerCAmelCase : Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : Dict = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase : Tuple = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : Any = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __lowerCamelCase ( self ):
# prepare image and target
__lowerCAmelCase : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
__lowerCAmelCase : Any = json.loads(f.read() )
__lowerCAmelCase : Tuple = {'image_id': 3_97_69, 'annotations': target}
# encode them
__lowerCAmelCase : Dict = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
__lowerCAmelCase : int = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
__lowerCAmelCase : str = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
# verify area
__lowerCAmelCase : List[str] = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
__lowerCAmelCase : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# verify image_id
__lowerCAmelCase : Dict = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
__lowerCAmelCase : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
__lowerCAmelCase : Union[str, Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify orig_size
__lowerCAmelCase : int = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
__lowerCAmelCase : List[Any] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
@slow
def __lowerCamelCase ( self ):
# prepare image, target and masks_path
__lowerCAmelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
__lowerCAmelCase : Optional[int] = json.loads(f.read() )
__lowerCAmelCase : Optional[int] = {'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target}
__lowerCAmelCase : Union[str, Any] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
__lowerCAmelCase : Optional[int] = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
__lowerCAmelCase : Optional[Any] = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , masks_path=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
__lowerCAmelCase : str = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
# verify area
__lowerCAmelCase : int = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
__lowerCAmelCase : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# verify image_id
__lowerCAmelCase : str = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
__lowerCAmelCase : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
__lowerCAmelCase : str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify masks
__lowerCAmelCase : Dict = 82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _SCREAMING_SNAKE_CASE )
# verify orig_size
__lowerCAmelCase : str = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
__lowerCAmelCase : List[Any] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) ) | 86 | 1 |
"""simple docstring"""
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
lowerCamelCase__ = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
lowerCamelCase__ = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase=False ):
__lowerCAmelCase , __lowerCAmelCase : List[str] = create_model(
'HTSAT-tiny' , 'roberta' , _UpperCamelCase , precision='fp32' , device='cuda:0' if torch.cuda.is_available() else 'cpu' , enable_fusion=_UpperCamelCase , fusion_type='aff_2d' if enable_fusion else None , )
return model, model_cfg
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Optional[int] = {}
__lowerCAmelCase : str = r'.*sequential.(\d+).*'
__lowerCAmelCase : int = r'.*_projection.(\d+).*'
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
__lowerCAmelCase : Union[str, Any] = key.replace(_UpperCamelCase , _UpperCamelCase )
if re.match(_UpperCamelCase , _UpperCamelCase ):
# replace sequential layers with list
__lowerCAmelCase : List[str] = re.match(_UpperCamelCase , _UpperCamelCase ).group(1 )
__lowerCAmelCase : Tuple = key.replace(F"sequential.{sequential_layer}." , F"layers.{int(_UpperCamelCase )//3}.linear." )
elif re.match(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Dict = int(re.match(_UpperCamelCase , _UpperCamelCase ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
__lowerCAmelCase : Tuple = 1 if projecton_layer == 0 else 2
__lowerCAmelCase : List[str] = key.replace(F"_projection.{projecton_layer}." , F"_projection.linear{transformers_projection_layer}." )
if "audio" and "qkv" in key:
# split qkv into query key and value
__lowerCAmelCase : int = value
__lowerCAmelCase : Union[str, Any] = mixed_qkv.size(0 ) // 3
__lowerCAmelCase : Optional[int] = mixed_qkv[:qkv_dim]
__lowerCAmelCase : Optional[Any] = mixed_qkv[qkv_dim : qkv_dim * 2]
__lowerCAmelCase : Optional[int] = mixed_qkv[qkv_dim * 2 :]
__lowerCAmelCase : Optional[Any] = query_layer
__lowerCAmelCase : Dict = key_layer
__lowerCAmelCase : str = value_layer
else:
__lowerCAmelCase : Dict = value
return model_state_dict
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=False ):
__lowerCAmelCase , __lowerCAmelCase : List[str] = init_clap(_UpperCamelCase , enable_fusion=_UpperCamelCase )
clap_model.eval()
__lowerCAmelCase : Optional[int] = clap_model.state_dict()
__lowerCAmelCase : Dict = rename_state_dict(_UpperCamelCase )
__lowerCAmelCase : List[str] = ClapConfig()
__lowerCAmelCase : List[str] = enable_fusion
__lowerCAmelCase : Dict = ClapModel(_UpperCamelCase )
# ignore the spectrogram embedding layer
model.load_state_dict(_UpperCamelCase , strict=_UpperCamelCase )
model.save_pretrained(_UpperCamelCase )
transformers_config.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
lowerCamelCase__ = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion) | 86 |
"""simple docstring"""
import numpy as np
def __lowerCAmelCase (_UpperCamelCase ):
return 1 / (1 + np.exp(-vector ))
def __lowerCAmelCase (_UpperCamelCase ):
return vector * sigmoid(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 | 1 |
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , ):
__lowerCAmelCase : Optional[int] = parent
__lowerCAmelCase : Union[str, Any] = batch_size
__lowerCAmelCase : Dict = seq_length
__lowerCAmelCase : Dict = is_training
__lowerCAmelCase : List[str] = use_input_mask
__lowerCAmelCase : int = use_token_type_ids
__lowerCAmelCase : Optional[int] = use_labels
__lowerCAmelCase : List[Any] = vocab_size
__lowerCAmelCase : Dict = hidden_size
__lowerCAmelCase : Tuple = embedding_size
__lowerCAmelCase : List[Any] = num_hidden_layers
__lowerCAmelCase : Tuple = num_attention_heads
__lowerCAmelCase : Union[str, Any] = intermediate_size
__lowerCAmelCase : Optional[Any] = hidden_act
__lowerCAmelCase : Optional[int] = hidden_dropout_prob
__lowerCAmelCase : Dict = attention_probs_dropout_prob
__lowerCAmelCase : Any = max_position_embeddings
__lowerCAmelCase : Any = type_vocab_size
__lowerCAmelCase : Union[str, Any] = type_sequence_label_size
__lowerCAmelCase : List[str] = initializer_range
__lowerCAmelCase : str = num_labels
__lowerCAmelCase : int = num_choices
__lowerCAmelCase : Union[str, Any] = scope
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : Optional[int] = None
if self.use_input_mask:
__lowerCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : str = None
if self.use_token_type_ids:
__lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase : Union[str, Any] = None
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : Union[str, Any] = None
if self.use_labels:
__lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self ):
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = MobileBertModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = MobileBertForMaskedLM(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = MobileBertForNextSentencePrediction(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Dict = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = MobileBertForPreTraining(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : List[Any] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , next_sentence_label=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = MobileBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : List[str] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = self.num_labels
__lowerCAmelCase : Tuple = MobileBertForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : str = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = self.num_labels
__lowerCAmelCase : int = MobileBertForTokenClassification(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = self.num_choices
__lowerCAmelCase : List[str] = MobileBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : List[str] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) : Optional[Any] = config_and_inputs
__lowerCAmelCase : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : str = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
A_ : List[str] = (
{
'feature-extraction': MobileBertModel,
'fill-mask': MobileBertForMaskedLM,
'question-answering': MobileBertForQuestionAnswering,
'text-classification': MobileBertForSequenceClassification,
'token-classification': MobileBertForTokenClassification,
'zero-shot': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ : Dict = True
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
__lowerCAmelCase : List[str] = super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
if return_labels:
if model_class in get_values(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
return inputs_dict
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = MobileBertModelTester(self )
__lowerCAmelCase : str = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase (_UpperCamelCase ):
return torch.tensor(
_UpperCamelCase , dtype=torch.long , device=_UpperCamelCase , )
lowerCamelCase__ = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
__lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE )[0]
__lowerCAmelCase : Dict = torch.Size((1, 9, 5_12) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = torch.tensor(
[
[
[-2.4_73_65_26E07, 8.2_69_16_56E04, 1.6_52_18_38E05],
[-5.7_54_17_04E-01, 3.9_05_60_22E00, 4.4_01_15_07E00],
[2.6_04_73_59E00, 1.5_67_76_52E00, -1.7_32_41_88E-01],
]
] , device=_SCREAMING_SNAKE_CASE , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
__lowerCAmelCase : Tuple = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
__lowerCAmelCase : Union[str, Any] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound ) | 86 |
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , ):
__lowerCAmelCase : Optional[int] = parent
__lowerCAmelCase : Union[str, Any] = batch_size
__lowerCAmelCase : Dict = seq_length
__lowerCAmelCase : Dict = is_training
__lowerCAmelCase : List[str] = use_input_mask
__lowerCAmelCase : int = use_token_type_ids
__lowerCAmelCase : Optional[int] = use_labels
__lowerCAmelCase : List[Any] = vocab_size
__lowerCAmelCase : Dict = hidden_size
__lowerCAmelCase : Tuple = embedding_size
__lowerCAmelCase : List[Any] = num_hidden_layers
__lowerCAmelCase : Tuple = num_attention_heads
__lowerCAmelCase : Union[str, Any] = intermediate_size
__lowerCAmelCase : Optional[Any] = hidden_act
__lowerCAmelCase : Optional[int] = hidden_dropout_prob
__lowerCAmelCase : Dict = attention_probs_dropout_prob
__lowerCAmelCase : Any = max_position_embeddings
__lowerCAmelCase : Any = type_vocab_size
__lowerCAmelCase : Union[str, Any] = type_sequence_label_size
__lowerCAmelCase : List[str] = initializer_range
__lowerCAmelCase : str = num_labels
__lowerCAmelCase : int = num_choices
__lowerCAmelCase : Union[str, Any] = scope
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : Optional[int] = None
if self.use_input_mask:
__lowerCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : str = None
if self.use_token_type_ids:
__lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase : Union[str, Any] = None
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : Union[str, Any] = None
if self.use_labels:
__lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self ):
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = MobileBertModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = MobileBertForMaskedLM(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = MobileBertForNextSentencePrediction(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Dict = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = MobileBertForPreTraining(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : List[Any] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , next_sentence_label=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = MobileBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : List[str] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = self.num_labels
__lowerCAmelCase : Tuple = MobileBertForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : str = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = self.num_labels
__lowerCAmelCase : int = MobileBertForTokenClassification(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = self.num_choices
__lowerCAmelCase : List[str] = MobileBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : List[str] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) : Optional[Any] = config_and_inputs
__lowerCAmelCase : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : str = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
A_ : List[str] = (
{
'feature-extraction': MobileBertModel,
'fill-mask': MobileBertForMaskedLM,
'question-answering': MobileBertForQuestionAnswering,
'text-classification': MobileBertForSequenceClassification,
'token-classification': MobileBertForTokenClassification,
'zero-shot': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ : Dict = True
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
__lowerCAmelCase : List[str] = super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
if return_labels:
if model_class in get_values(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
return inputs_dict
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = MobileBertModelTester(self )
__lowerCAmelCase : str = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase (_UpperCamelCase ):
return torch.tensor(
_UpperCamelCase , dtype=torch.long , device=_UpperCamelCase , )
lowerCamelCase__ = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
__lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE )[0]
__lowerCAmelCase : Dict = torch.Size((1, 9, 5_12) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = torch.tensor(
[
[
[-2.4_73_65_26E07, 8.2_69_16_56E04, 1.6_52_18_38E05],
[-5.7_54_17_04E-01, 3.9_05_60_22E00, 4.4_01_15_07E00],
[2.6_04_73_59E00, 1.5_67_76_52E00, -1.7_32_41_88E-01],
]
] , device=_SCREAMING_SNAKE_CASE , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
__lowerCAmelCase : Tuple = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
__lowerCAmelCase : Union[str, Any] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound ) | 86 | 1 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase ):
for i in range(len(_UpperCamelCase ) - 1 , 0 , -1 ):
__lowerCAmelCase : Dict = False
for j in range(_UpperCamelCase , 0 , -1 ):
if unsorted[j] < unsorted[j - 1]:
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = unsorted[j - 1], unsorted[j]
__lowerCAmelCase : Tuple = True
for j in range(_UpperCamelCase ):
if unsorted[j] > unsorted[j + 1]:
__lowerCAmelCase , __lowerCAmelCase : Dict = unsorted[j + 1], unsorted[j]
__lowerCAmelCase : List[str] = True
if not swapped:
break
return unsorted
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
lowerCamelCase__ = [int(item) for item in user_input.split(""",""")]
print(f'{cocktail_shaker_sort(unsorted) = }') | 86 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class A__ ( _lowerCamelCase):
A_ : Any = ['image_processor', 'tokenizer']
A_ : Optional[Any] = 'AutoImageProcessor'
A_ : str = 'AutoTokenizer'
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = kwargs.pop('feature_extractor' )
__lowerCAmelCase : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = self.image_processor
__lowerCAmelCase : Tuple = False
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = kwargs.pop('images' , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = kwargs.pop('text' , _SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
__lowerCAmelCase : Dict = args[0]
__lowerCAmelCase : Union[str, Any] = args[1:]
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
__lowerCAmelCase : Union[str, Any] = self.image_processor(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None:
__lowerCAmelCase : Dict = self.tokenizer(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is None:
return inputs
elif images is None:
return encodings
else:
__lowerCAmelCase : Union[str, Any] = encodings['input_ids']
return inputs
def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@contextmanager
def __lowerCamelCase ( self ):
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your images inputs, or in a separate call.' )
__lowerCAmelCase : Any = True
__lowerCAmelCase : Dict = self.tokenizer
yield
__lowerCAmelCase : Optional[int] = self.image_processor
__lowerCAmelCase : Optional[Any] = False
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None ):
if added_vocab is None:
__lowerCAmelCase : str = self.tokenizer.get_added_vocab()
__lowerCAmelCase : List[Any] = {}
while tokens:
__lowerCAmelCase : int = re.search(R'<s_(.*?)>' , _SCREAMING_SNAKE_CASE , re.IGNORECASE )
if start_token is None:
break
__lowerCAmelCase : Union[str, Any] = start_token.group(1 )
__lowerCAmelCase : Tuple = re.search(Rf"</s_{key}>" , _SCREAMING_SNAKE_CASE , re.IGNORECASE )
__lowerCAmelCase : str = start_token.group()
if end_token is None:
__lowerCAmelCase : Optional[int] = tokens.replace(_SCREAMING_SNAKE_CASE , '' )
else:
__lowerCAmelCase : Optional[Any] = end_token.group()
__lowerCAmelCase : Tuple = re.escape(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = re.escape(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = re.search(f"{start_token_escaped}(.*?){end_token_escaped}" , _SCREAMING_SNAKE_CASE , re.IGNORECASE )
if content is not None:
__lowerCAmelCase : List[str] = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
__lowerCAmelCase : int = self.tokenajson(_SCREAMING_SNAKE_CASE , is_inner_value=_SCREAMING_SNAKE_CASE , added_vocab=_SCREAMING_SNAKE_CASE )
if value:
if len(_SCREAMING_SNAKE_CASE ) == 1:
__lowerCAmelCase : Tuple = value[0]
__lowerCAmelCase : Tuple = value
else: # leaf nodes
__lowerCAmelCase : Any = []
for leaf in content.split(R'<sep/>' ):
__lowerCAmelCase : List[Any] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
__lowerCAmelCase : Dict = leaf[1:-2] # for categorical special tokens
output[key].append(_SCREAMING_SNAKE_CASE )
if len(output[key] ) == 1:
__lowerCAmelCase : str = output[key][0]
__lowerCAmelCase : Dict = tokens[tokens.find(_SCREAMING_SNAKE_CASE ) + len(_SCREAMING_SNAKE_CASE ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=_SCREAMING_SNAKE_CASE , added_vocab=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def __lowerCamelCase ( self ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def __lowerCamelCase ( self ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _SCREAMING_SNAKE_CASE , )
return self.image_processor | 86 | 1 |
"""simple docstring"""
lowerCamelCase__ = """0.18.2"""
from .configuration_utils import ConfigMixin
from .utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_inflect_available,
is_invisible_watermark_available,
is_k_diffusion_available,
is_k_diffusion_version,
is_librosa_available,
is_note_seq_available,
is_onnx_available,
is_scipy_available,
is_torch_available,
is_torchsde_available,
is_transformers_available,
is_transformers_version,
is_unidecode_available,
logging,
)
try:
if not is_onnx_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_onnx_objects import * # noqa F403
else:
from .pipelines import OnnxRuntimeModel
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_pt_objects import * # noqa F403
else:
from .models import (
AutoencoderKL,
ControlNetModel,
ModelMixin,
PriorTransformer,
TaFilmDecoder,
TransformeraDModel,
UNetaDModel,
UNetaDConditionModel,
UNetaDModel,
UNetaDConditionModel,
VQModel,
)
from .optimization import (
get_constant_schedule,
get_constant_schedule_with_warmup,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
get_scheduler,
)
from .pipelines import (
AudioPipelineOutput,
ConsistencyModelPipeline,
DanceDiffusionPipeline,
DDIMPipeline,
DDPMPipeline,
DiffusionPipeline,
DiTPipeline,
ImagePipelineOutput,
KarrasVePipeline,
LDMPipeline,
LDMSuperResolutionPipeline,
PNDMPipeline,
RePaintPipeline,
ScoreSdeVePipeline,
)
from .schedulers import (
CMStochasticIterativeScheduler,
DDIMInverseScheduler,
DDIMParallelScheduler,
DDIMScheduler,
DDPMParallelScheduler,
DDPMScheduler,
DEISMultistepScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
HeunDiscreteScheduler,
IPNDMScheduler,
KarrasVeScheduler,
KDPMaAncestralDiscreteScheduler,
KDPMaDiscreteScheduler,
PNDMScheduler,
RePaintScheduler,
SchedulerMixin,
ScoreSdeVeScheduler,
UnCLIPScheduler,
UniPCMultistepScheduler,
VQDiffusionScheduler,
)
from .training_utils import EMAModel
try:
if not (is_torch_available() and is_scipy_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_scipy_objects import * # noqa F403
else:
from .schedulers import LMSDiscreteScheduler
try:
if not (is_torch_available() and is_torchsde_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_torchsde_objects import * # noqa F403
else:
from .schedulers import DPMSolverSDEScheduler
try:
if not (is_torch_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
AltDiffusionImgaImgPipeline,
AltDiffusionPipeline,
AudioLDMPipeline,
CycleDiffusionPipeline,
IFImgaImgPipeline,
IFImgaImgSuperResolutionPipeline,
IFInpaintingPipeline,
IFInpaintingSuperResolutionPipeline,
IFPipeline,
IFSuperResolutionPipeline,
ImageTextPipelineOutput,
KandinskyImgaImgPipeline,
KandinskyInpaintPipeline,
KandinskyPipeline,
KandinskyPriorPipeline,
KandinskyVaaControlnetImgaImgPipeline,
KandinskyVaaControlnetPipeline,
KandinskyVaaImgaImgPipeline,
KandinskyVaaInpaintPipeline,
KandinskyVaaPipeline,
KandinskyVaaPriorEmbaEmbPipeline,
KandinskyVaaPriorPipeline,
LDMTextToImagePipeline,
PaintByExamplePipeline,
SemanticStableDiffusionPipeline,
ShapEImgaImgPipeline,
ShapEPipeline,
StableDiffusionAttendAndExcitePipeline,
StableDiffusionControlNetImgaImgPipeline,
StableDiffusionControlNetInpaintPipeline,
StableDiffusionControlNetPipeline,
StableDiffusionDepthaImgPipeline,
StableDiffusionDiffEditPipeline,
StableDiffusionImageVariationPipeline,
StableDiffusionImgaImgPipeline,
StableDiffusionInpaintPipeline,
StableDiffusionInpaintPipelineLegacy,
StableDiffusionInstructPixaPixPipeline,
StableDiffusionLatentUpscalePipeline,
StableDiffusionLDMaDPipeline,
StableDiffusionModelEditingPipeline,
StableDiffusionPanoramaPipeline,
StableDiffusionParadigmsPipeline,
StableDiffusionPipeline,
StableDiffusionPipelineSafe,
StableDiffusionPixaPixZeroPipeline,
StableDiffusionSAGPipeline,
StableDiffusionUpscalePipeline,
StableUnCLIPImgaImgPipeline,
StableUnCLIPPipeline,
TextToVideoSDPipeline,
TextToVideoZeroPipeline,
UnCLIPImageVariationPipeline,
UnCLIPPipeline,
UniDiffuserModel,
UniDiffuserPipeline,
UniDiffuserTextDecoder,
VersatileDiffusionDualGuidedPipeline,
VersatileDiffusionImageVariationPipeline,
VersatileDiffusionPipeline,
VersatileDiffusionTextToImagePipeline,
VideoToVideoSDPipeline,
VQDiffusionPipeline,
)
try:
if not (is_torch_available() and is_transformers_available() and is_invisible_watermark_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_invisible_watermark_objects import * # noqa F403
else:
from .pipelines import StableDiffusionXLImgaImgPipeline, StableDiffusionXLPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_k_diffusion_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_k_diffusion_objects import * # noqa F403
else:
from .pipelines import StableDiffusionKDiffusionPipeline
try:
if not (is_torch_available() and is_transformers_available() and is_onnx_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_transformers_and_onnx_objects import * # noqa F403
else:
from .pipelines import (
OnnxStableDiffusionImgaImgPipeline,
OnnxStableDiffusionInpaintPipeline,
OnnxStableDiffusionInpaintPipelineLegacy,
OnnxStableDiffusionPipeline,
OnnxStableDiffusionUpscalePipeline,
StableDiffusionOnnxPipeline,
)
try:
if not (is_torch_available() and is_librosa_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_torch_and_librosa_objects import * # noqa F403
else:
from .pipelines import AudioDiffusionPipeline, Mel
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .pipelines import SpectrogramDiffusionPipeline
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_objects import * # noqa F403
else:
from .models.controlnet_flax import FlaxControlNetModel
from .models.modeling_flax_utils import FlaxModelMixin
from .models.unet_ad_condition_flax import FlaxUNetaDConditionModel
from .models.vae_flax import FlaxAutoencoderKL
from .pipelines import FlaxDiffusionPipeline
from .schedulers import (
FlaxDDIMScheduler,
FlaxDDPMScheduler,
FlaxDPMSolverMultistepScheduler,
FlaxKarrasVeScheduler,
FlaxLMSDiscreteScheduler,
FlaxPNDMScheduler,
FlaxSchedulerMixin,
FlaxScoreSdeVeScheduler,
)
try:
if not (is_flax_available() and is_transformers_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_flax_and_transformers_objects import * # noqa F403
else:
from .pipelines import (
FlaxStableDiffusionControlNetPipeline,
FlaxStableDiffusionImgaImgPipeline,
FlaxStableDiffusionInpaintPipeline,
FlaxStableDiffusionPipeline,
)
try:
if not (is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from .utils.dummy_note_seq_objects import * # noqa F403
else:
from .pipelines import MidiProcessor | 86 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Tuple = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def __lowerCAmelCase (_UpperCamelCase = 100 ):
__lowerCAmelCase : Optional[int] = 1
__lowerCAmelCase : Optional[Any] = 2
for i in range(2 , max_n + 1 ):
__lowerCAmelCase : Any = pre_numerator
__lowerCAmelCase : Union[str, Any] = 2 * i // 3 if i % 3 == 0 else 1
__lowerCAmelCase : int = cur_numerator
__lowerCAmelCase : Dict = e_cont * pre_numerator + temp
return sum_digits(_UpperCamelCase )
if __name__ == "__main__":
print(f'{solution() = }') | 86 | 1 |
"""simple docstring"""
import os
import unittest
from transformers.models.phobert.tokenization_phobert import VOCAB_FILES_NAMES, PhobertTokenizer
from ...test_tokenization_common import TokenizerTesterMixin
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : Dict = PhobertTokenizer
A_ : int = False
def __lowerCamelCase ( self ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
__lowerCAmelCase : List[str] = ['T@@', 'i', 'I', 'R@@', 'r', 'e@@']
__lowerCAmelCase : List[Any] = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
__lowerCAmelCase : List[Any] = ['#version: 0.2', 'l à</w>']
__lowerCAmelCase : Optional[Any] = {'unk_token': '<unk>'}
__lowerCAmelCase : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowerCAmelCase : str = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
for token in vocab_tokens:
fp.write(f"{token} {vocab_tokens[token]}\n" )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_SCREAMING_SNAKE_CASE ) )
def __lowerCamelCase ( self , **_SCREAMING_SNAKE_CASE ):
kwargs.update(self.special_tokens_map )
return PhobertTokenizer.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = 'Tôi là VinAI Research'
__lowerCAmelCase : Tuple = 'T<unk> i <unk> <unk> <unk> <unk> <unk> <unk> I Re<unk> e<unk> <unk> <unk> <unk>'
return input_text, output_text
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = PhobertTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
__lowerCAmelCase : List[str] = 'Tôi là VinAI Research'
__lowerCAmelCase : List[str] = 'T@@ ô@@ i l@@ à V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h'.split()
__lowerCAmelCase : Optional[int] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
print(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = tokens + [tokenizer.unk_token]
__lowerCAmelCase : List[Any] = [4, 3, 5, 3, 3, 3, 3, 3, 3, 6, 7, 9, 3, 9, 3, 3, 3, 3, 3]
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE ) | 86 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""",
"""microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""",
}
class A__ ( _lowerCamelCase):
A_ : List[Any] = 'markuplm'
def __init__( self , _SCREAMING_SNAKE_CASE=3_05_22 , _SCREAMING_SNAKE_CASE=7_68 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=30_72 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2_56 , _SCREAMING_SNAKE_CASE=10_24 , _SCREAMING_SNAKE_CASE=2_16 , _SCREAMING_SNAKE_CASE=10_01 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=50 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ):
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = vocab_size
__lowerCAmelCase : Any = hidden_size
__lowerCAmelCase : List[Any] = num_hidden_layers
__lowerCAmelCase : Tuple = num_attention_heads
__lowerCAmelCase : Union[str, Any] = hidden_act
__lowerCAmelCase : List[Any] = intermediate_size
__lowerCAmelCase : List[str] = hidden_dropout_prob
__lowerCAmelCase : List[str] = attention_probs_dropout_prob
__lowerCAmelCase : Optional[int] = max_position_embeddings
__lowerCAmelCase : int = type_vocab_size
__lowerCAmelCase : Tuple = initializer_range
__lowerCAmelCase : int = layer_norm_eps
__lowerCAmelCase : List[str] = position_embedding_type
__lowerCAmelCase : List[Any] = use_cache
__lowerCAmelCase : Optional[Any] = classifier_dropout
# additional properties
__lowerCAmelCase : Optional[int] = max_depth
__lowerCAmelCase : List[str] = max_xpath_tag_unit_embeddings
__lowerCAmelCase : Optional[Any] = max_xpath_subs_unit_embeddings
__lowerCAmelCase : Any = tag_pad_id
__lowerCAmelCase : Union[str, Any] = subs_pad_id
__lowerCAmelCase : int = xpath_unit_hidden_size | 86 | 1 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""SCUT-DLVCLab/lilt-roberta-en-base""": (
"""https://huggingface.co/SCUT-DLVCLab/lilt-roberta-en-base/resolve/main/config.json"""
),
}
class A__ ( _lowerCamelCase):
A_ : Union[str, Any] = 'lilt'
def __init__( self , _SCREAMING_SNAKE_CASE=3_05_22 , _SCREAMING_SNAKE_CASE=7_68 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=30_72 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=10_24 , **_SCREAMING_SNAKE_CASE , ):
super().__init__(pad_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = vocab_size
__lowerCAmelCase : List[str] = hidden_size
__lowerCAmelCase : Optional[int] = num_hidden_layers
__lowerCAmelCase : str = num_attention_heads
__lowerCAmelCase : Tuple = hidden_act
__lowerCAmelCase : Any = intermediate_size
__lowerCAmelCase : List[Any] = hidden_dropout_prob
__lowerCAmelCase : Optional[Any] = attention_probs_dropout_prob
__lowerCAmelCase : Any = max_position_embeddings
__lowerCAmelCase : List[str] = type_vocab_size
__lowerCAmelCase : str = initializer_range
__lowerCAmelCase : Tuple = layer_norm_eps
__lowerCAmelCase : int = position_embedding_type
__lowerCAmelCase : Dict = classifier_dropout
__lowerCAmelCase : str = channel_shrink_ratio
__lowerCAmelCase : Dict = max_ad_position_embeddings | 86 |
"""simple docstring"""
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
lowerCamelCase__ = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
for attribute in key.split('.' ):
__lowerCAmelCase : str = getattr(_UpperCamelCase , _UpperCamelCase )
if weight_type is not None:
__lowerCAmelCase : Tuple = getattr(_UpperCamelCase , _UpperCamelCase ).shape
else:
__lowerCAmelCase : Dict = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
__lowerCAmelCase : Union[str, Any] = value
elif weight_type == "weight_g":
__lowerCAmelCase : List[Any] = value
elif weight_type == "weight_v":
__lowerCAmelCase : Any = value
elif weight_type == "bias":
__lowerCAmelCase : List[str] = value
else:
__lowerCAmelCase : List[Any] = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Any = []
__lowerCAmelCase : Optional[int] = fairseq_model.state_dict()
__lowerCAmelCase : Union[str, Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__lowerCAmelCase : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hf_model.config.feat_extract_norm == 'group' , )
__lowerCAmelCase : str = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__lowerCAmelCase : int = True
if "*" in mapped_key:
__lowerCAmelCase : List[str] = name.split(_UpperCamelCase )[0].split('.' )[-2]
__lowerCAmelCase : Optional[Any] = mapped_key.replace('*' , _UpperCamelCase )
if "weight_g" in name:
__lowerCAmelCase : Union[str, Any] = 'weight_g'
elif "weight_v" in name:
__lowerCAmelCase : int = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
__lowerCAmelCase : Optional[Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCAmelCase : List[str] = 'weight'
else:
__lowerCAmelCase : Optional[Any] = None
set_recursively(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
continue
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(F"Unused weights: {unused_weights}" )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : List[Any] = full_name.split('conv_layers.' )[-1]
__lowerCAmelCase : Any = name.split('.' )
__lowerCAmelCase : List[Any] = int(items[0] )
__lowerCAmelCase : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
__lowerCAmelCase : Tuple = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
__lowerCAmelCase : int = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
__lowerCAmelCase : Optional[Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
__lowerCAmelCase : Any = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_UpperCamelCase )
@torch.no_grad()
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
# load the pre-trained checkpoints
__lowerCAmelCase : Any = torch.load(_UpperCamelCase )
__lowerCAmelCase : List[str] = WavLMConfigOrig(checkpoint['cfg'] )
__lowerCAmelCase : Optional[Any] = WavLMOrig(_UpperCamelCase )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
__lowerCAmelCase : Dict = WavLMConfig.from_pretrained(_UpperCamelCase )
else:
__lowerCAmelCase : List[str] = WavLMConfig()
__lowerCAmelCase : List[str] = WavLMModel(_UpperCamelCase )
recursively_load_weights(_UpperCamelCase , _UpperCamelCase )
hf_wavlm.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
lowerCamelCase__ = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 86 | 1 |
"""simple docstring"""
from __future__ import annotations
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : List[Any] = 2
__lowerCAmelCase : List[Any] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(_UpperCamelCase )
if n > 1:
factors.append(_UpperCamelCase )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 |
"""simple docstring"""
import os
import pytest
from attr import dataclass
lowerCamelCase__ = """us-east-1""" # defaults region
@dataclass
class A__ :
A_ : str
A_ : Union[str, Any] = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
A_ : Optional[int] = {
'task_name': 'mnli',
'per_device_train_batch_size': 1_6,
'per_device_eval_batch_size': 1_6,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 5_0_0,
'save_steps': 5_5_0_0,
}
A_ : List[Any] = {**hyperparameters, 'max_steps': 1_0_0_0}
@property
def __lowerCamelCase ( self ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def __lowerCamelCase ( self ):
return f"{self.framework}-transfromers-test"
@property
def __lowerCamelCase ( self ):
return f"./tests/sagemaker/scripts/{self.framework}"
@property
def __lowerCamelCase ( self ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : str = SageMakerTestEnvironment(framework=request.cls.framework ) | 86 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase__ = {
"""configuration_data2vec_audio""": ["""DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Data2VecAudioConfig"""],
"""configuration_data2vec_text""": [
"""DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecTextConfig""",
"""Data2VecTextOnnxConfig""",
],
"""configuration_data2vec_vision""": [
"""DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""Data2VecVisionConfig""",
"""Data2VecVisionOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecAudioForAudioFrameClassification""",
"""Data2VecAudioForCTC""",
"""Data2VecAudioForSequenceClassification""",
"""Data2VecAudioForXVector""",
"""Data2VecAudioModel""",
"""Data2VecAudioPreTrainedModel""",
]
lowerCamelCase__ = [
"""DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecTextForCausalLM""",
"""Data2VecTextForMaskedLM""",
"""Data2VecTextForMultipleChoice""",
"""Data2VecTextForQuestionAnswering""",
"""Data2VecTextForSequenceClassification""",
"""Data2VecTextForTokenClassification""",
"""Data2VecTextModel""",
"""Data2VecTextPreTrainedModel""",
]
lowerCamelCase__ = [
"""DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Data2VecVisionForImageClassification""",
"""Data2VecVisionForMaskedImageModeling""",
"""Data2VecVisionForSemanticSegmentation""",
"""Data2VecVisionModel""",
"""Data2VecVisionPreTrainedModel""",
]
if is_tf_available():
lowerCamelCase__ = [
"""TFData2VecVisionForImageClassification""",
"""TFData2VecVisionForSemanticSegmentation""",
"""TFData2VecVisionModel""",
"""TFData2VecVisionPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_dataavec_audio import DATA2VEC_AUDIO_PRETRAINED_CONFIG_ARCHIVE_MAP, DataaVecAudioConfig
from .configuration_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecTextConfig,
DataaVecTextOnnxConfig,
)
from .configuration_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_CONFIG_ARCHIVE_MAP,
DataaVecVisionConfig,
DataaVecVisionOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_dataavec_audio import (
DATA2VEC_AUDIO_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecAudioForAudioFrameClassification,
DataaVecAudioForCTC,
DataaVecAudioForSequenceClassification,
DataaVecAudioForXVector,
DataaVecAudioModel,
DataaVecAudioPreTrainedModel,
)
from .modeling_dataavec_text import (
DATA2VEC_TEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecTextForCausalLM,
DataaVecTextForMaskedLM,
DataaVecTextForMultipleChoice,
DataaVecTextForQuestionAnswering,
DataaVecTextForSequenceClassification,
DataaVecTextForTokenClassification,
DataaVecTextModel,
DataaVecTextPreTrainedModel,
)
from .modeling_dataavec_vision import (
DATA2VEC_VISION_PRETRAINED_MODEL_ARCHIVE_LIST,
DataaVecVisionForImageClassification,
DataaVecVisionForMaskedImageModeling,
DataaVecVisionForSemanticSegmentation,
DataaVecVisionModel,
DataaVecVisionPreTrainedModel,
)
if is_tf_available():
from .modeling_tf_dataavec_vision import (
TFDataaVecVisionForImageClassification,
TFDataaVecVisionForSemanticSegmentation,
TFDataaVecVisionModel,
TFDataaVecVisionPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 86 |
"""simple docstring"""
from __future__ import annotations
lowerCamelCase__ = list[tuple[int, int]]
lowerCamelCase__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowerCamelCase__ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : int = pos_x
__lowerCAmelCase : Optional[Any] = pos_y
__lowerCAmelCase : Optional[int] = (pos_y, pos_x)
__lowerCAmelCase : Union[str, Any] = goal_x
__lowerCAmelCase : Any = goal_y
__lowerCAmelCase : Optional[Any] = g_cost
__lowerCAmelCase : Any = parent
__lowerCAmelCase : Union[str, Any] = self.calculate_heuristic()
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = abs(self.pos_x - self.goal_x )
__lowerCAmelCase : str = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , _SCREAMING_SNAKE_CASE ):
return self.f_cost < other.f_cost
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = [self.start]
__lowerCAmelCase : list[Node] = []
__lowerCAmelCase : str = False
def __lowerCamelCase ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__lowerCAmelCase : Optional[int] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
__lowerCAmelCase : Union[str, Any] = True
return self.retrace_path(_SCREAMING_SNAKE_CASE )
self.closed_nodes.append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = self.get_successors(_SCREAMING_SNAKE_CASE )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
else:
# retrieve the best current path
__lowerCAmelCase : Optional[Any] = self.open_nodes.pop(self.open_nodes.index(_SCREAMING_SNAKE_CASE ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
else:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
if not self.reached:
return [self.start.pos]
return None
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = []
for action in delta:
__lowerCAmelCase : Optional[int] = parent.pos_x + action[1]
__lowerCAmelCase : Union[str, Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_SCREAMING_SNAKE_CASE ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _SCREAMING_SNAKE_CASE , ) )
return successors
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = node
__lowerCAmelCase : Optional[int] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowerCAmelCase : int = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
lowerCamelCase__ = (0, 0)
lowerCamelCase__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("""------""")
lowerCamelCase__ = GreedyBestFirst(init, goal)
lowerCamelCase__ = greedy_bf.search()
if path:
for pos_x, pos_y in path:
lowerCamelCase__ = 2
for elem in grid:
print(elem) | 86 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class A__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : Tuple = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TFAutoModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = AutoModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = TFAutoModelForPreTraining.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = AutoModelForPreTraining.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TFAutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : List[str] = TFAutoModelForCausalLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = TFAutoModelForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : int = TFAutoModelForMaskedLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = AutoModelForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : str = AutoModelForMaskedLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = AutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Dict = AutoModelForSeqaSeqLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TFAutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = TFAutoModelForQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = AutoModelForQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
__lowerCAmelCase : Tuple = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
__lowerCAmelCase : Tuple = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 ) | 86 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float(moles / volume ) * nfactor )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 | 1 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP | 86 |
"""simple docstring"""
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class A__ ( enum.Enum):
A_ : List[Any] = 0
A_ : Dict = 1
A_ : Union[str, Any] = 2
@add_end_docstrings(_lowerCamelCase)
class A__ ( _lowerCamelCase):
A_ : str = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
__lowerCAmelCase : Any = None
if self.model.config.prefix is not None:
__lowerCAmelCase : str = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
__lowerCAmelCase : Tuple = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self._sanitize_parameters(prefix=_SCREAMING_SNAKE_CASE , **self._forward_params )
__lowerCAmelCase : List[str] = {**self._preprocess_params, **preprocess_params}
__lowerCAmelCase : List[str] = {**self._forward_params, **forward_params}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : Optional[int] = {}
if prefix is not None:
__lowerCAmelCase : Union[str, Any] = prefix
if prefix:
__lowerCAmelCase : Dict = self.tokenizer(
_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
__lowerCAmelCase : List[Any] = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
' [None, \'hole\']' )
__lowerCAmelCase : int = handle_long_generation
preprocess_params.update(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = generate_kwargs
__lowerCAmelCase : List[Any] = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
__lowerCAmelCase : Optional[Any] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
__lowerCAmelCase : List[Any] = ReturnType.TENSORS
if return_type is not None:
__lowerCAmelCase : Optional[Any] = return_type
if clean_up_tokenization_spaces is not None:
__lowerCAmelCase : Tuple = clean_up_tokenization_spaces
if stop_sequence is not None:
__lowerCAmelCase : Union[str, Any] = self.tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
__lowerCAmelCase : Optional[Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="" , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = self.tokenizer(
prefix + prompt_text , padding=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
__lowerCAmelCase : Optional[Any] = prompt_text
if handle_long_generation == "hole":
__lowerCAmelCase : str = inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
__lowerCAmelCase : Union[str, Any] = generate_kwargs['max_new_tokens']
else:
__lowerCAmelCase : Any = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
__lowerCAmelCase : Any = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
__lowerCAmelCase : int = inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
__lowerCAmelCase : List[Any] = inputs['attention_mask'][:, -keep_length:]
return inputs
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = model_inputs['input_ids']
__lowerCAmelCase : List[Any] = model_inputs.get('attention_mask' , _SCREAMING_SNAKE_CASE )
# Allow empty prompts
if input_ids.shape[1] == 0:
__lowerCAmelCase : Dict = None
__lowerCAmelCase : str = None
__lowerCAmelCase : Tuple = 1
else:
__lowerCAmelCase : Any = input_ids.shape[0]
__lowerCAmelCase : Union[str, Any] = model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
__lowerCAmelCase : Optional[int] = generate_kwargs.pop('prefix_length' , 0 )
if prefix_length > 0:
__lowerCAmelCase : Any = 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
__lowerCAmelCase : List[str] = generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
__lowerCAmelCase : Dict = 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
__lowerCAmelCase : Optional[int] = self.model.generate(input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = generated_sequence.shape[0]
if self.framework == "pt":
__lowerCAmelCase : Dict = generated_sequence.reshape(_SCREAMING_SNAKE_CASE , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
__lowerCAmelCase : Any = tf.reshape(_SCREAMING_SNAKE_CASE , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=ReturnType.FULL_TEXT , _SCREAMING_SNAKE_CASE=True ):
__lowerCAmelCase : Any = model_outputs['generated_sequence'][0]
__lowerCAmelCase : Tuple = model_outputs['input_ids']
__lowerCAmelCase : Any = model_outputs['prompt_text']
__lowerCAmelCase : int = generated_sequence.numpy().tolist()
__lowerCAmelCase : Union[str, Any] = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
__lowerCAmelCase : int = {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
__lowerCAmelCase : Any = self.tokenizer.decode(
_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
__lowerCAmelCase : Optional[Any] = 0
else:
__lowerCAmelCase : Any = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE , ) )
if return_type == ReturnType.FULL_TEXT:
__lowerCAmelCase : Union[str, Any] = prompt_text + text[prompt_length:]
else:
__lowerCAmelCase : int = text[prompt_length:]
__lowerCAmelCase : Dict = {'generated_text': all_text}
records.append(_SCREAMING_SNAKE_CASE )
return records | 86 | 1 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class A__ :
@staticmethod
def __lowerCamelCase ( *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class A__ ( unittest.TestCase):
A_ : Optional[Any] = MODEL_FOR_OBJECT_DETECTION_MAPPING
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = ObjectDetectionPipeline(model=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = object_detector('./tests/fixtures/tests_samples/COCO/000000039769.png' , threshold=0.0 )
self.assertGreater(len(_SCREAMING_SNAKE_CASE ) , 0 )
for detected_object in outputs:
self.assertEqual(
_SCREAMING_SNAKE_CASE , {
'score': ANY(_SCREAMING_SNAKE_CASE ),
'label': ANY(_SCREAMING_SNAKE_CASE ),
'box': {'xmin': ANY(_SCREAMING_SNAKE_CASE ), 'ymin': ANY(_SCREAMING_SNAKE_CASE ), 'xmax': ANY(_SCREAMING_SNAKE_CASE ), 'ymax': ANY(_SCREAMING_SNAKE_CASE )},
} , )
import datasets
__lowerCAmelCase : Tuple = datasets.load_dataset('hf-internal-testing/fixtures_image_utils' , 'image' , split='test' )
__lowerCAmelCase : int = [
Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' ),
'http://images.cocodataset.org/val2017/000000039769.jpg',
# RGBA
dataset[0]['file'],
# LA
dataset[1]['file'],
# L
dataset[2]['file'],
]
__lowerCAmelCase : Union[str, Any] = object_detector(_SCREAMING_SNAKE_CASE , threshold=0.0 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) )
for outputs in batch_outputs:
self.assertGreater(len(_SCREAMING_SNAKE_CASE ) , 0 )
for detected_object in outputs:
self.assertEqual(
_SCREAMING_SNAKE_CASE , {
'score': ANY(_SCREAMING_SNAKE_CASE ),
'label': ANY(_SCREAMING_SNAKE_CASE ),
'box': {'xmin': ANY(_SCREAMING_SNAKE_CASE ), 'ymin': ANY(_SCREAMING_SNAKE_CASE ), 'xmax': ANY(_SCREAMING_SNAKE_CASE ), 'ymax': ANY(_SCREAMING_SNAKE_CASE )},
} , )
@require_tf
@unittest.skip('Object detection not implemented in TF' )
def __lowerCamelCase ( self ):
pass
@require_torch
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = 'hf-internal-testing/tiny-detr-mobilenetsv3'
__lowerCAmelCase : List[str] = AutoModelForObjectDetection.from_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = AutoFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = ObjectDetectionPipeline(model=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=0.0 )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
] , )
__lowerCAmelCase : Dict = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
],
[
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
{'score': 0.3376, 'label': 'LABEL_0', 'box': {'xmin': 1_59, 'ymin': 1_20, 'xmax': 4_80, 'ymax': 3_59}},
],
] , )
@require_torch
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = 'facebook/detr-resnet-50'
__lowerCAmelCase : List[str] = AutoModelForObjectDetection.from_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = AutoFeatureExtractor.from_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = ObjectDetectionPipeline(model=_SCREAMING_SNAKE_CASE , feature_extractor=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
] , )
__lowerCAmelCase : Any = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
],
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
],
] , )
@require_torch
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = 'facebook/detr-resnet-50'
__lowerCAmelCase : Any = pipeline('object-detection' , model=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
] , )
__lowerCAmelCase : List[Any] = object_detector(
[
'http://images.cocodataset.org/val2017/000000039769.jpg',
'http://images.cocodataset.org/val2017/000000039769.jpg',
] )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
],
[
{'score': 0.9982, 'label': 'remote', 'box': {'xmin': 40, 'ymin': 70, 'xmax': 1_75, 'ymax': 1_17}},
{'score': 0.9960, 'label': 'remote', 'box': {'xmin': 3_33, 'ymin': 72, 'xmax': 3_68, 'ymax': 1_87}},
{'score': 0.9955, 'label': 'couch', 'box': {'xmin': 0, 'ymin': 1, 'xmax': 6_39, 'ymax': 4_73}},
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
],
] , )
@require_torch
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = 0.9985
__lowerCAmelCase : List[str] = 'facebook/detr-resnet-50'
__lowerCAmelCase : Tuple = pipeline('object-detection' , model=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = object_detector('http://images.cocodataset.org/val2017/000000039769.jpg' , threshold=_SCREAMING_SNAKE_CASE )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'score': 0.9988, 'label': 'cat', 'box': {'xmin': 13, 'ymin': 52, 'xmax': 3_14, 'ymax': 4_70}},
{'score': 0.9987, 'label': 'cat', 'box': {'xmin': 3_45, 'ymin': 23, 'xmax': 6_40, 'ymax': 3_68}},
] , )
@require_torch
@require_pytesseract
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = 'Narsil/layoutlmv3-finetuned-funsd'
__lowerCAmelCase : Optional[Any] = 0.9993
__lowerCAmelCase : Tuple = pipeline('object-detection' , model=_SCREAMING_SNAKE_CASE , threshold=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = object_detector(
'https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png' )
self.assertEqual(
nested_simplify(_SCREAMING_SNAKE_CASE , decimals=4 ) , [
{'score': 0.9993, 'label': 'I-ANSWER', 'box': {'xmin': 2_94, 'ymin': 2_54, 'xmax': 3_43, 'ymax': 2_64}},
{'score': 0.9993, 'label': 'I-ANSWER', 'box': {'xmin': 2_94, 'ymin': 2_54, 'xmax': 3_43, 'ymax': 2_64}},
] , ) | 86 |
"""simple docstring"""
from __future__ import annotations
import bisect
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
if hi < 0:
__lowerCAmelCase : Tuple = len(_UpperCamelCase )
while lo < hi:
__lowerCAmelCase : Optional[int] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__lowerCAmelCase : int = mid + 1
else:
__lowerCAmelCase : List[str] = mid
return lo
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
if hi < 0:
__lowerCAmelCase : List[Any] = len(_UpperCamelCase )
while lo < hi:
__lowerCAmelCase : Union[str, Any] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__lowerCAmelCase : Dict = mid + 1
else:
__lowerCAmelCase : str = mid
return lo
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
sorted_collection.insert(bisect_left(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
sorted_collection.insert(bisect_right(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : List[Any] = 0
__lowerCAmelCase : int = len(_UpperCamelCase ) - 1
while left <= right:
__lowerCAmelCase : List[Any] = left + (right - left) // 2
__lowerCAmelCase : Union[str, Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__lowerCAmelCase : Tuple = midpoint - 1
else:
__lowerCAmelCase : str = midpoint + 1
return None
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Union[str, Any] = bisect.bisect_left(_UpperCamelCase , _UpperCamelCase )
if index != len(_UpperCamelCase ) and sorted_collection[index] == item:
return index
return None
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if right < left:
return None
__lowerCAmelCase : List[str] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , midpoint - 1 )
else:
return binary_search_by_recursion(_UpperCamelCase , _UpperCamelCase , midpoint + 1 , _UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase__ = input("""Enter numbers separated by comma:\n""").strip()
lowerCamelCase__ = sorted(int(item) for item in user_input.split(""","""))
lowerCamelCase__ = int(input("""Enter a single number to be found in the list:\n"""))
lowerCamelCase__ = binary_search(collection, target)
if result is None:
print(f'{target} was not found in {collection}.')
else:
print(f'{target} was found at position {result} in {collection}.') | 86 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
"""configuration_mctct""": ["""MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MCTCTConfig"""],
"""feature_extraction_mctct""": ["""MCTCTFeatureExtractor"""],
"""processing_mctct""": ["""MCTCTProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MCTCTForCTC""",
"""MCTCTModel""",
"""MCTCTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 86 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class A__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : Tuple = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TFAutoModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = AutoModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = TFAutoModelForPreTraining.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = AutoModelForPreTraining.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TFAutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : List[str] = TFAutoModelForCausalLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = TFAutoModelForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : int = TFAutoModelForMaskedLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = AutoModelForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : str = AutoModelForMaskedLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = AutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Dict = AutoModelForSeqaSeqLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TFAutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = TFAutoModelForQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = AutoModelForQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
__lowerCAmelCase : Tuple = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
__lowerCAmelCase : Tuple = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 ) | 86 | 1 |
"""simple docstring"""
from __future__ import annotations
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if days_between_payments <= 0:
raise ValueError('days_between_payments must be > 0' )
if daily_interest_rate < 0:
raise ValueError('daily_interest_rate must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return principal * daily_interest_rate * days_between_payments
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
if number_of_compounding_periods <= 0:
raise ValueError('number_of_compounding_periods must be > 0' )
if nominal_annual_interest_rate_percentage < 0:
raise ValueError('nominal_annual_interest_rate_percentage must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return principal * (
(1 + nominal_annual_interest_rate_percentage) ** number_of_compounding_periods
- 1
)
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , ):
if number_of_years <= 0:
raise ValueError('number_of_years must be > 0' )
if nominal_annual_percentage_rate < 0:
raise ValueError('nominal_annual_percentage_rate must be >= 0' )
if principal <= 0:
raise ValueError('principal must be > 0' )
return compound_interest(
_UpperCamelCase , nominal_annual_percentage_rate / 365 , number_of_years * 365 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
lowerCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase__ = """
Examples:
```py
>>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline
>>> import torch
>>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> negative_image_emb = out.negative_image_embeds
>>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... prompt,
... image_embeds=image_emb,
... negative_image_embeds=negative_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=8 ):
__lowerCAmelCase : Dict = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
__lowerCAmelCase : List[str] = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class A__ ( _lowerCamelCase):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
super().__init__()
self.register_modules(
text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , movq=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if latents is None:
__lowerCAmelCase : Tuple = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
__lowerCAmelCase : Any = latents.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = latents * scheduler.init_noise_sigma
return latents
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , ):
__lowerCAmelCase : Tuple = len(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else 1
# get prompt text embeddings
__lowerCAmelCase : Dict = self.tokenizer(
_SCREAMING_SNAKE_CASE , padding='max_length' , truncation=_SCREAMING_SNAKE_CASE , max_length=77 , return_attention_mask=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors='pt' , )
__lowerCAmelCase : Tuple = text_inputs.input_ids
__lowerCAmelCase : Union[str, Any] = self.tokenizer(_SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
__lowerCAmelCase : Dict = text_input_ids.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = text_inputs.attention_mask.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : str = self.text_encoder(
input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = prompt_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : Dict = text_encoder_hidden_states.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : Optional[int] = text_mask.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
__lowerCAmelCase : List[str]
if negative_prompt is None:
__lowerCAmelCase : Union[str, Any] = [''] * batch_size
elif type(_SCREAMING_SNAKE_CASE ) is not type(_SCREAMING_SNAKE_CASE ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(_SCREAMING_SNAKE_CASE )} !="
f" {type(_SCREAMING_SNAKE_CASE )}." )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = [negative_prompt]
elif batch_size != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(_SCREAMING_SNAKE_CASE )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.' )
else:
__lowerCAmelCase : Optional[int] = negative_prompt
__lowerCAmelCase : Tuple = self.tokenizer(
_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=77 , truncation=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors='pt' , )
__lowerCAmelCase : Union[str, Any] = uncond_input.input_ids.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = uncond_input.attention_mask.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Any = self.text_encoder(
input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__lowerCAmelCase : List[str] = negative_prompt_embeds.shape[1]
__lowerCAmelCase : Any = negative_prompt_embeds.repeat(1 , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = uncond_text_encoder_hidden_states.shape[1]
__lowerCAmelCase : List[Any] = uncond_text_encoder_hidden_states.repeat(1 , _SCREAMING_SNAKE_CASE , 1 )
__lowerCAmelCase : Optional[int] = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , _SCREAMING_SNAKE_CASE , -1 )
__lowerCAmelCase : Optional[Any] = uncond_text_mask.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowerCAmelCase : Tuple = torch.cat([negative_prompt_embeds, prompt_embeds] )
__lowerCAmelCase : Tuple = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
__lowerCAmelCase : int = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
__lowerCAmelCase : Union[str, Any] = torch.device(f"cuda:{gpu_id}" )
__lowerCAmelCase : List[Any] = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=0 ):
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
__lowerCAmelCase : str = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__lowerCAmelCase : Any = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
__lowerCAmelCase , __lowerCAmelCase : Any = cpu_offload_with_hook(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , prev_module_hook=_SCREAMING_SNAKE_CASE )
if self.safety_checker is not None:
__lowerCAmelCase , __lowerCAmelCase : Dict = cpu_offload_with_hook(self.safety_checker , _SCREAMING_SNAKE_CASE , prev_module_hook=_SCREAMING_SNAKE_CASE )
# We'll offload the last model manually.
__lowerCAmelCase : Optional[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCamelCase ( self ):
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_SCREAMING_SNAKE_CASE , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 5_12 , _SCREAMING_SNAKE_CASE = 5_12 , _SCREAMING_SNAKE_CASE = 1_00 , _SCREAMING_SNAKE_CASE = 4.0 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = 1
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = len(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(_SCREAMING_SNAKE_CASE )}" )
__lowerCAmelCase : Dict = self._execution_device
__lowerCAmelCase : Optional[Any] = batch_size * num_images_per_prompt
__lowerCAmelCase : Optional[int] = guidance_scale > 1.0
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : int = self._encode_prompt(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
__lowerCAmelCase : Optional[Any] = image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : int = negative_image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : Any = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=_SCREAMING_SNAKE_CASE )
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = self.scheduler.timesteps
__lowerCAmelCase : int = self.unet.config.in_channels
__lowerCAmelCase , __lowerCAmelCase : Any = get_new_h_w(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.movq_scale_factor )
# create initial latent
__lowerCAmelCase : str = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.scheduler , )
for i, t in enumerate(self.progress_bar(_SCREAMING_SNAKE_CASE ) ):
# expand the latents if we are doing classifier free guidance
__lowerCAmelCase : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCAmelCase : Union[str, Any] = {'text_embeds': prompt_embeds, 'image_embeds': image_embeds}
__lowerCAmelCase : Optional[Any] = self.unet(
sample=_SCREAMING_SNAKE_CASE , timestep=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , added_cond_kwargs=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )[0]
if do_classifier_free_guidance:
__lowerCAmelCase , __lowerCAmelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 )
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = noise_pred.chunk(2 )
__lowerCAmelCase , __lowerCAmelCase : int = variance_pred.chunk(2 )
__lowerCAmelCase : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__lowerCAmelCase : Any = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCAmelCase : List[str] = self.scheduler.step(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , ).prev_sample
# post-processing
__lowerCAmelCase : Tuple = self.movq.decode(_SCREAMING_SNAKE_CASE , force_not_quantize=_SCREAMING_SNAKE_CASE )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
__lowerCAmelCase : List[str] = image * 0.5 + 0.5
__lowerCAmelCase : Dict = image.clamp(0 , 1 )
__lowerCAmelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowerCAmelCase : Union[str, Any] = self.numpy_to_pil(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_SCREAMING_SNAKE_CASE ) | 86 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_torch_available,
)
lowerCamelCase__ = {
"""configuration_speecht5""": [
"""SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP""",
"""SpeechT5Config""",
"""SpeechT5HifiGanConfig""",
],
"""feature_extraction_speecht5""": ["""SpeechT5FeatureExtractor"""],
"""processing_speecht5""": ["""SpeechT5Processor"""],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""SpeechT5Tokenizer"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""SpeechT5ForSpeechToText""",
"""SpeechT5ForSpeechToSpeech""",
"""SpeechT5ForTextToSpeech""",
"""SpeechT5Model""",
"""SpeechT5PreTrainedModel""",
"""SpeechT5HifiGan""",
]
if TYPE_CHECKING:
from .configuration_speechta import (
SPEECHT5_PRETRAINED_CONFIG_ARCHIVE_MAP,
SPEECHT5_PRETRAINED_HIFIGAN_CONFIG_ARCHIVE_MAP,
SpeechTaConfig,
SpeechTaHifiGanConfig,
)
from .feature_extraction_speechta import SpeechTaFeatureExtractor
from .processing_speechta import SpeechTaProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_speechta import SpeechTaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_speechta import (
SPEECHT5_PRETRAINED_MODEL_ARCHIVE_LIST,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaHifiGan,
SpeechTaModel,
SpeechTaPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 86 |
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : Union[str, Any] = BarthezTokenizer
A_ : Tuple = BarthezTokenizerFast
A_ : Dict = True
A_ : List[str] = True
def __lowerCamelCase ( self ):
super().setUp()
__lowerCAmelCase : str = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = tokenizer
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = '<pad>'
__lowerCAmelCase : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 10_11_22 )
def __lowerCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_11_22 )
@require_torch
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__lowerCAmelCase : Optional[Any] = [0, 57, 30_18, 7_03_07, 91, 2]
__lowerCAmelCase : Optional[int] = self.tokenizer(
_SCREAMING_SNAKE_CASE , max_length=len(_SCREAMING_SNAKE_CASE ) , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__lowerCAmelCase : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
if not self.test_rust_tokenizer:
return
__lowerCAmelCase : Tuple = self.get_tokenizer()
__lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
__lowerCAmelCase : List[str] = 'I was born in 92000, and this is falsé.'
__lowerCAmelCase : Optional[int] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
__lowerCAmelCase : List[Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# fmt: off
__lowerCAmelCase : str = {'input_ids': [[0, 4_90, 1_43_28, 45_07, 3_54, 47, 4_36_69, 95, 25, 7_81_17, 2_02_15, 1_97_79, 1_90, 22, 4_00, 4, 3_53_43, 8_03_10, 6_03, 86, 2_49_37, 1_05, 3_34_38, 9_47_62, 1_96, 3_96_42, 7, 15, 1_59_33, 1_73, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_05_34, 87, 25, 66, 33_58, 1_96, 5_52_89, 8, 8_29_61, 81, 22_04, 7_52_03, 7, 15, 7_63, 1_29_56, 2_16, 1_78, 1_43_28, 95_95, 13_77, 6_96_93, 7, 4_48, 7_10_21, 1_96, 1_81_06, 14_37, 1_39_74, 1_08, 90_83, 4, 4_93_15, 7, 39, 86, 13_26, 27_93, 4_63_33, 4, 4_48, 1_96, 7_45_88, 7, 4_93_15, 7, 39, 21, 8_22, 3_84_70, 74, 21, 6_67_23, 6_24_80, 8, 2_20_50, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__lowerCAmelCase : Union[str, Any] = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=_SCREAMING_SNAKE_CASE , ) | 86 | 1 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""Salesforce/blip-vqa-base""": """https://huggingface.co/Salesforce/blip-vqa-base/resolve/main/config.json""",
"""Salesforce/blip-vqa-capfit-large""": (
"""https://huggingface.co/Salesforce/blip-vqa-base-capfit/resolve/main/config.json"""
),
"""Salesforce/blip-image-captioning-base""": (
"""https://huggingface.co/Salesforce/blip-image-captioning-base/resolve/main/config.json"""
),
"""Salesforce/blip-image-captioning-large""": (
"""https://huggingface.co/Salesforce/blip-image-captioning-large/resolve/main/config.json"""
),
"""Salesforce/blip-itm-base-coco""": """https://huggingface.co/Salesforce/blip-itm-base-coco/resolve/main/config.json""",
"""Salesforce/blip-itm-large-coco""": """https://huggingface.co/Salesforce/blip-itm-large-coco/resolve/main/config.json""",
"""Salesforce/blip-itm-base-flikr""": """https://huggingface.co/Salesforce/blip-itm-base-flikr/resolve/main/config.json""",
"""Salesforce/blip-itm-large-flikr""": (
"""https://huggingface.co/Salesforce/blip-itm-large-flikr/resolve/main/config.json"""
),
}
class A__ ( _lowerCamelCase):
A_ : Any = 'blip_text_model'
def __init__( self , _SCREAMING_SNAKE_CASE=3_05_24 , _SCREAMING_SNAKE_CASE=7_68 , _SCREAMING_SNAKE_CASE=7_68 , _SCREAMING_SNAKE_CASE=30_72 , _SCREAMING_SNAKE_CASE=7_68 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3_05_22 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=1_02 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , **_SCREAMING_SNAKE_CASE , ):
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , sep_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Optional[int] = vocab_size
__lowerCAmelCase : List[str] = hidden_size
__lowerCAmelCase : List[str] = encoder_hidden_size
__lowerCAmelCase : Tuple = intermediate_size
__lowerCAmelCase : Optional[int] = projection_dim
__lowerCAmelCase : int = hidden_dropout_prob
__lowerCAmelCase : Any = num_hidden_layers
__lowerCAmelCase : str = num_attention_heads
__lowerCAmelCase : List[Any] = max_position_embeddings
__lowerCAmelCase : str = layer_norm_eps
__lowerCAmelCase : Optional[int] = hidden_act
__lowerCAmelCase : List[str] = initializer_range
__lowerCAmelCase : List[str] = attention_probs_dropout_prob
__lowerCAmelCase : Union[str, Any] = is_decoder
__lowerCAmelCase : Optional[Any] = use_cache
@classmethod
def __lowerCamelCase ( cls , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
cls._set_token_in_kwargs(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Dict = cls.get_config_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# get the text config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
__lowerCAmelCase : Any = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class A__ ( _lowerCamelCase):
A_ : List[Any] = 'blip_vision_model'
def __init__( self , _SCREAMING_SNAKE_CASE=7_68 , _SCREAMING_SNAKE_CASE=30_72 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=3_84 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=1E-10 , **_SCREAMING_SNAKE_CASE , ):
super().__init__(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = hidden_size
__lowerCAmelCase : int = intermediate_size
__lowerCAmelCase : str = projection_dim
__lowerCAmelCase : Optional[int] = num_hidden_layers
__lowerCAmelCase : Tuple = num_attention_heads
__lowerCAmelCase : Optional[Any] = patch_size
__lowerCAmelCase : Union[str, Any] = image_size
__lowerCAmelCase : Union[str, Any] = initializer_range
__lowerCAmelCase : List[str] = attention_dropout
__lowerCAmelCase : List[Any] = layer_norm_eps
__lowerCAmelCase : Dict = hidden_act
@classmethod
def __lowerCamelCase ( cls , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
cls._set_token_in_kwargs(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : List[Any] = cls.get_config_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
# get the vision config dict if we are loading from BlipConfig
if config_dict.get('model_type' ) == "blip":
__lowerCAmelCase : str = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
f"{cls.model_type}. This is not supported for all configurations of models and can yield errors." )
return cls.from_dict(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class A__ ( _lowerCamelCase):
A_ : List[Any] = 'blip'
A_ : int = True
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=2.6592 , _SCREAMING_SNAKE_CASE=2_56 , **_SCREAMING_SNAKE_CASE , ):
super().__init__(**_SCREAMING_SNAKE_CASE )
if text_config is None:
__lowerCAmelCase : Tuple = {}
logger.info('`text_config` is `None`. Initializing the `BlipTextConfig` with default values.' )
if vision_config is None:
__lowerCAmelCase : List[str] = {}
logger.info('`vision_config` is `None`. Initializing the `BlipVisionConfig` with default values.' )
__lowerCAmelCase : List[str] = BlipTextConfig(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = BlipVisionConfig(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = self.vision_config.hidden_size
__lowerCAmelCase : List[str] = projection_dim
__lowerCAmelCase : Union[str, Any] = logit_scale_init_value
__lowerCAmelCase : Tuple = 1.0
__lowerCAmelCase : Optional[int] = 0.02
__lowerCAmelCase : int = image_text_hidden_size
@classmethod
def __lowerCamelCase ( cls , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = copy.deepcopy(self.__dict__ )
__lowerCAmelCase : List[str] = self.text_config.to_dict()
__lowerCAmelCase : List[str] = self.vision_config.to_dict()
__lowerCAmelCase : Dict = self.__class__.model_type
return output | 86 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class A__ ( _lowerCamelCase):
A_ : Optional[int] = 'poolformer'
def __init__( self , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4.0 , _SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , _SCREAMING_SNAKE_CASE=[64, 1_28, 3_20, 5_12] , _SCREAMING_SNAKE_CASE=[7, 3, 3, 3] , _SCREAMING_SNAKE_CASE=[4, 2, 2, 2] , _SCREAMING_SNAKE_CASE=[2, 1, 1, 1] , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE=0.02 , **_SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : int = num_channels
__lowerCAmelCase : str = patch_size
__lowerCAmelCase : Optional[Any] = stride
__lowerCAmelCase : Optional[int] = padding
__lowerCAmelCase : List[Any] = pool_size
__lowerCAmelCase : int = hidden_sizes
__lowerCAmelCase : str = mlp_ratio
__lowerCAmelCase : Optional[int] = depths
__lowerCAmelCase : str = patch_sizes
__lowerCAmelCase : str = strides
__lowerCAmelCase : Optional[int] = num_encoder_blocks
__lowerCAmelCase : Any = drop_path_rate
__lowerCAmelCase : Any = hidden_act
__lowerCAmelCase : Dict = use_layer_scale
__lowerCAmelCase : Union[str, Any] = layer_scale_init_value
__lowerCAmelCase : Dict = initializer_range
super().__init__(**_SCREAMING_SNAKE_CASE )
class A__ ( _lowerCamelCase):
A_ : List[str] = version.parse('1.11')
@property
def __lowerCamelCase ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __lowerCamelCase ( self ):
return 2E-3 | 86 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""FNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""FNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""FNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FNetForMaskedLM""",
"""FNetForMultipleChoice""",
"""FNetForNextSentencePrediction""",
"""FNetForPreTraining""",
"""FNetForQuestionAnswering""",
"""FNetForSequenceClassification""",
"""FNetForTokenClassification""",
"""FNetLayer""",
"""FNetModel""",
"""FNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 86 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : Union[str, Any] = DiTPipeline
A_ : Union[str, Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
A_ : List[Any] = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
A_ : Optional[Any] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
A_ : Tuple = False
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : List[str] = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_SCREAMING_SNAKE_CASE , activation_fn='gelu-approximate' , num_embeds_ada_norm=10_00 , norm_type='ada_norm_zero' , norm_elementwise_affine=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : str = AutoencoderKL()
__lowerCAmelCase : Union[str, Any] = DDIMScheduler()
__lowerCAmelCase : Dict = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
__lowerCAmelCase : List[str] = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : List[str] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = 'cpu'
__lowerCAmelCase : Any = self.get_dummy_components()
__lowerCAmelCase : Union[str, Any] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = pipe(**_SCREAMING_SNAKE_CASE ).images
__lowerCAmelCase : List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
__lowerCAmelCase : Optional[int] = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
__lowerCAmelCase : List[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 )
def __lowerCamelCase ( self ):
self._test_inference_batch_single_identical(relax_max_difference=_SCREAMING_SNAKE_CASE , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __lowerCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = torch.manual_seed(0 )
__lowerCAmelCase : int = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
__lowerCAmelCase : Optional[Any] = ['vase', 'umbrella', 'white shark', 'white wolf']
__lowerCAmelCase : Optional[Any] = pipe.get_label_ids(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=40 , output_type='np' ).images
for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = load_numpy(
f"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" )
assert np.abs((expected_image - image).max() ) < 1E-2
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
__lowerCAmelCase : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
__lowerCAmelCase : Dict = ['vase', 'umbrella']
__lowerCAmelCase : List[str] = pipe.get_label_ids(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = torch.manual_seed(0 )
__lowerCAmelCase : Optional[Any] = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type='np' ).images
for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
f"/dit/{word}_512.npy" )
assert np.abs((expected_image - image).max() ) < 1E-1 | 86 | 1 |
"""simple docstring"""
# Author: OMKAR PATHAK, Nwachukwu Chidiebere
# Use a Python dictionary to construct the graph.
from __future__ import annotations
from pprint import pformat
from typing import Generic, TypeVar
lowerCamelCase__ = TypeVar("""T""")
class A__ ( Generic[T]):
def __init__( self , _SCREAMING_SNAKE_CASE = True ):
__lowerCAmelCase : dict[T, list[T]] = {} # dictionary of lists
__lowerCAmelCase : Optional[int] = directed
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if not self.directed: # For undirected graphs
# if both source vertex and destination vertex are both present in the
# adjacency list, add destination vertex to source vertex list of adjacent
# vertices and add source vertex to destination vertex list of adjacent
# vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_SCREAMING_SNAKE_CASE )
self.adj_list[destination_vertex].append(_SCREAMING_SNAKE_CASE )
# if only source vertex is present in adjacency list, add destination vertex
# to source vertex list of adjacent vertices, then create a new vertex with
# destination vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = [source_vertex]
# if only destination vertex is present in adjacency list, add source vertex
# to destination vertex list of adjacent vertices, then create a new vertex
# with source vertex as key and assign a list containing the source vertex
# as it's first adjacent vertex.
elif destination_vertex in self.adj_list:
self.adj_list[destination_vertex].append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and assign a list
# containing the destination vertex as it's first adjacent vertex also
# create a new vertex with destination vertex as key and assign a list
# containing the source vertex as it's first adjacent vertex.
else:
__lowerCAmelCase : str = [destination_vertex]
__lowerCAmelCase : Optional[int] = [source_vertex]
else: # For directed graphs
# if both source vertex and destination vertex are present in adjacency
# list, add destination vertex to source vertex list of adjacent vertices.
if source_vertex in self.adj_list and destination_vertex in self.adj_list:
self.adj_list[source_vertex].append(_SCREAMING_SNAKE_CASE )
# if only source vertex is present in adjacency list, add destination
# vertex to source vertex list of adjacent vertices and create a new vertex
# with destination vertex as key, which has no adjacent vertex
elif source_vertex in self.adj_list:
self.adj_list[source_vertex].append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = []
# if only destination vertex is present in adjacency list, create a new
# vertex with source vertex as key and assign a list containing destination
# vertex as first adjacent vertex
elif destination_vertex in self.adj_list:
__lowerCAmelCase : Any = [destination_vertex]
# if both source vertex and destination vertex are not present in adjacency
# list, create a new vertex with source vertex as key and a list containing
# destination vertex as it's first adjacent vertex. Then create a new vertex
# with destination vertex as key, which has no adjacent vertex
else:
__lowerCAmelCase : str = [destination_vertex]
__lowerCAmelCase : int = []
return self
def __repr__( self ):
return pformat(self.adj_list ) | 86 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : str = ShapEImgaImgPipeline
A_ : str = ['image']
A_ : int = ['image']
A_ : Tuple = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
A_ : Tuple = False
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return self.time_input_dim * 4
@property
def __lowerCamelCase ( self ):
return 8
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Any = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
__lowerCAmelCase : Tuple = CLIPVisionModel(_SCREAMING_SNAKE_CASE )
return model
@property
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = CLIPImageProcessor(
crop_size=2_24 , do_center_crop=_SCREAMING_SNAKE_CASE , do_normalize=_SCREAMING_SNAKE_CASE , do_resize=_SCREAMING_SNAKE_CASE , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_24 , )
return image_processor
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Optional[Any] = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__lowerCAmelCase : List[Any] = PriorTransformer(**_SCREAMING_SNAKE_CASE )
return model
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Dict = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__lowerCAmelCase : int = ShapERenderer(**_SCREAMING_SNAKE_CASE )
return model
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.dummy_prior
__lowerCAmelCase : List[Any] = self.dummy_image_encoder
__lowerCAmelCase : int = self.dummy_image_processor
__lowerCAmelCase : Any = self.dummy_renderer
__lowerCAmelCase : Any = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=10_24 , prediction_type='sample' , use_karras_sigmas=_SCREAMING_SNAKE_CASE , clip_sample=_SCREAMING_SNAKE_CASE , clip_sample_range=1.0 , )
__lowerCAmelCase : Tuple = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
__lowerCAmelCase : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
__lowerCAmelCase : int = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : str = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = 'cpu'
__lowerCAmelCase : Dict = self.get_dummy_components()
__lowerCAmelCase : Optional[int] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = pipe(**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Any = output.images[0]
__lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__lowerCAmelCase : List[Any] = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = torch_device == 'cpu'
__lowerCAmelCase : Optional[Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_SCREAMING_SNAKE_CASE , relax_max_difference=_SCREAMING_SNAKE_CASE , )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.get_dummy_components()
__lowerCAmelCase : List[str] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = 1
__lowerCAmelCase : List[str] = 2
__lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
for key in inputs.keys():
if key in self.batch_params:
__lowerCAmelCase : Optional[Any] = batch_size * [inputs[key]]
__lowerCAmelCase : List[str] = pipe(**_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' )
__lowerCAmelCase : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy' )
__lowerCAmelCase : Union[str, Any] = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' )
__lowerCAmelCase : Dict = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
__lowerCAmelCase : int = pipe(
_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) | 86 | 1 |
"""simple docstring"""
import requests
from bsa import BeautifulSoup
def __lowerCAmelCase (_UpperCamelCase = "https://www.worldometers.info/coronavirus" ):
__lowerCAmelCase : Optional[int] = BeautifulSoup(requests.get(_UpperCamelCase ).text , 'html.parser' )
__lowerCAmelCase : str = soup.findAll('h1' )
__lowerCAmelCase : str = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(_UpperCamelCase , _UpperCamelCase )}
if __name__ == "__main__":
print("""\033[1m""" + """COVID-19 Status of the World""" + """\033[0m\n""")
for key, value in world_covidaa_stats().items():
print(f'{key}\n{value}\n') | 86 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""FNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""FNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""FNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FNetForMaskedLM""",
"""FNetForMultipleChoice""",
"""FNetForNextSentencePrediction""",
"""FNetForPreTraining""",
"""FNetForQuestionAnswering""",
"""FNetForSequenceClassification""",
"""FNetForTokenClassification""",
"""FNetLayer""",
"""FNetModel""",
"""FNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 86 | 1 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline | 86 |
"""simple docstring"""
import math
import sys
def __lowerCAmelCase (_UpperCamelCase ):
if number != int(_UpperCamelCase ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
__lowerCAmelCase : Any = [-1] * (number + 1)
__lowerCAmelCase : List[Any] = 0
for i in range(1 , number + 1 ):
__lowerCAmelCase : List[Any] = sys.maxsize
__lowerCAmelCase : Optional[int] = int(math.sqrt(_UpperCamelCase ) )
for j in range(1 , root + 1 ):
__lowerCAmelCase : Optional[Any] = 1 + answers[i - (j**2)]
__lowerCAmelCase : Any = min(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : List[str] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 | 1 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
lowerCamelCase__ = None
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""vocab_file""": """spiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
},
"""tokenizer_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json""",
},
}
lowerCamelCase__ = {
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
lowerCamelCase__ = """▁"""
class A__ ( _lowerCamelCase):
A_ : Union[str, Any] = VOCAB_FILES_NAMES
A_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
A_ : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Optional[int] = AlbertTokenizer
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="[SEP]" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="[CLS]" , _SCREAMING_SNAKE_CASE="[MASK]" , **_SCREAMING_SNAKE_CASE , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
__lowerCAmelCase : List[Any] = (
AddedToken(_SCREAMING_SNAKE_CASE , lstrip=_SCREAMING_SNAKE_CASE , rstrip=_SCREAMING_SNAKE_CASE , normalized=_SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else mask_token
)
super().__init__(
_SCREAMING_SNAKE_CASE , tokenizer_file=_SCREAMING_SNAKE_CASE , do_lower_case=_SCREAMING_SNAKE_CASE , remove_space=_SCREAMING_SNAKE_CASE , keep_accents=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , sep_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , cls_token=_SCREAMING_SNAKE_CASE , mask_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Dict = do_lower_case
__lowerCAmelCase : List[Any] = remove_space
__lowerCAmelCase : List[Any] = keep_accents
__lowerCAmelCase : int = vocab_file
__lowerCAmelCase : Dict = False if not self.vocab_file else True
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
__lowerCAmelCase : Dict = [self.sep_token_id]
__lowerCAmelCase : Dict = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
__lowerCAmelCase : Optional[int] = [self.sep_token_id]
__lowerCAmelCase : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__lowerCAmelCase : str = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_SCREAMING_SNAKE_CASE ):
copyfile(self.vocab_file , _SCREAMING_SNAKE_CASE )
return (out_vocab_file,) | 86 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=14 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=0.02 , ):
__lowerCAmelCase : Union[str, Any] = parent
__lowerCAmelCase : Any = batch_size
__lowerCAmelCase : Any = seq_length
__lowerCAmelCase : Optional[Any] = is_training
__lowerCAmelCase : Any = use_input_mask
__lowerCAmelCase : Any = use_token_type_ids
__lowerCAmelCase : Tuple = use_labels
__lowerCAmelCase : Optional[Any] = vocab_size
__lowerCAmelCase : Tuple = hidden_size
__lowerCAmelCase : str = rotary_dim
__lowerCAmelCase : Union[str, Any] = num_hidden_layers
__lowerCAmelCase : Union[str, Any] = num_attention_heads
__lowerCAmelCase : int = intermediate_size
__lowerCAmelCase : List[str] = hidden_act
__lowerCAmelCase : int = hidden_dropout_prob
__lowerCAmelCase : Any = attention_probs_dropout_prob
__lowerCAmelCase : List[Any] = max_position_embeddings
__lowerCAmelCase : Optional[Any] = initializer_range
__lowerCAmelCase : Tuple = None
__lowerCAmelCase : int = vocab_size - 1
__lowerCAmelCase : Dict = vocab_size - 1
__lowerCAmelCase : int = vocab_size - 1
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : List[str] = None
if self.use_input_mask:
__lowerCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : Optional[int] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=_SCREAMING_SNAKE_CASE , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = config_and_inputs
__lowerCAmelCase : Dict = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = 20
__lowerCAmelCase : List[str] = model_class_name(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = model.init_cache(input_ids.shape[0] , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' )
__lowerCAmelCase : Optional[Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCAmelCase : Any = model(
input_ids[:, :-1] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCAmelCase : int = model(
input_ids[:, -1:] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = 20
__lowerCAmelCase : List[str] = model_class_name(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__lowerCAmelCase : List[str] = model.init_cache(input_ids.shape[0] , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCAmelCase : Optional[Any] = model(
input_ids[:, :-1] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : str = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCAmelCase : Tuple = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
@require_flax
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : Tuple = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
A_ : str = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = FlaxGPTJModelTester(self )
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@tooslow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' )
__lowerCAmelCase : Optional[int] = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
__lowerCAmelCase : Any = False
__lowerCAmelCase : Any = model.config.eos_token_id
__lowerCAmelCase : Union[str, Any] = jax.jit(model.generate )
__lowerCAmelCase : Optional[Any] = jit_generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences
__lowerCAmelCase : str = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@is_pt_flax_cross_test
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCAmelCase : str = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCAmelCase : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCAmelCase : Optional[int] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : List[Any] = pt_inputs['input_ids'].shape
__lowerCAmelCase : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = 0
__lowerCAmelCase : Tuple = 1
__lowerCAmelCase : List[str] = 0
__lowerCAmelCase : Any = 1
__lowerCAmelCase : Optional[Any] = pt_model_class(_SCREAMING_SNAKE_CASE ).eval()
__lowerCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
__lowerCAmelCase : int = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = fx_state
with torch.no_grad():
__lowerCAmelCase : Union[str, Any] = pt_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
__lowerCAmelCase : str = fx_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = model_class.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = fx_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(
len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCAmelCase : List[str] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCAmelCase : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCAmelCase : str = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = pt_model_class(_SCREAMING_SNAKE_CASE ).eval()
__lowerCAmelCase : Tuple = model_class(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
__lowerCAmelCase : List[str] = load_flax_weights_in_pytorch_model(_SCREAMING_SNAKE_CASE , fx_model.params )
__lowerCAmelCase , __lowerCAmelCase : int = pt_inputs['input_ids'].shape
__lowerCAmelCase : List[str] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = 0
__lowerCAmelCase : Optional[Any] = 1
__lowerCAmelCase : Optional[int] = 0
__lowerCAmelCase : Optional[Any] = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__lowerCAmelCase : List[str] = pt_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
__lowerCAmelCase : Optional[int] = fx_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = pt_model_class.from_pretrained(_SCREAMING_SNAKE_CASE , from_flax=_SCREAMING_SNAKE_CASE )
with torch.no_grad():
__lowerCAmelCase : Any = pt_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(
len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__lowerCAmelCase : Optional[int] = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
__lowerCAmelCase : List[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) | 86 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, List, Mapping, Optional
from packaging import version
if TYPE_CHECKING:
from ... import PreTrainedTokenizer, TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast, PatchingSpec
from ...utils import is_torch_available, logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""bigscience/bloom""": """https://huggingface.co/bigscience/bloom/resolve/main/config.json""",
"""bigscience/bloom-560m""": """https://huggingface.co/bigscience/bloom-560m/blob/main/config.json""",
"""bigscience/bloom-1b1""": """https://huggingface.co/bigscience/bloom-1b1/blob/main/config.json""",
"""bigscience/bloom-1b7""": """https://huggingface.co/bigscience/bloom-1b7/blob/main/config.json""",
"""bigscience/bloom-3b""": """https://huggingface.co/bigscience/bloom-3b/blob/main/config.json""",
"""bigscience/bloom-7b1""": """https://huggingface.co/bigscience/bloom-7b1/blob/main/config.json""",
}
class A__ ( _lowerCamelCase):
A_ : int = 'bloom'
A_ : Tuple = ['past_key_values']
A_ : Union[str, Any] = {
'num_hidden_layers': 'n_layer',
'num_attention_heads': 'n_head',
}
def __init__( self , _SCREAMING_SNAKE_CASE=25_08_80 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : Any = vocab_size
# Backward compatibility with n_embed kwarg
__lowerCAmelCase : List[str] = kwargs.pop('n_embed' , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = hidden_size if n_embed is None else n_embed
__lowerCAmelCase : Optional[int] = n_layer
__lowerCAmelCase : List[Any] = n_head
__lowerCAmelCase : Dict = layer_norm_epsilon
__lowerCAmelCase : Optional[Any] = initializer_range
__lowerCAmelCase : Optional[Any] = use_cache
__lowerCAmelCase : Any = pretraining_tp
__lowerCAmelCase : Any = apply_residual_connection_post_layernorm
__lowerCAmelCase : int = hidden_dropout
__lowerCAmelCase : Any = attention_dropout
__lowerCAmelCase : List[Any] = bos_token_id
__lowerCAmelCase : Dict = eos_token_id
__lowerCAmelCase : str = slow_but_exact
super().__init__(bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
class A__ ( _lowerCamelCase):
A_ : Union[str, Any] = version.parse('1.12')
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = "default" , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False , ):
super().__init__(_SCREAMING_SNAKE_CASE , task=_SCREAMING_SNAKE_CASE , patching_specs=_SCREAMING_SNAKE_CASE , use_past=_SCREAMING_SNAKE_CASE )
if not getattr(self._config , 'pad_token_id' , _SCREAMING_SNAKE_CASE ):
# TODO: how to do that better?
__lowerCAmelCase : Optional[Any] = 0
@property
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = OrderedDict({'input_ids': {0: 'batch', 1: 'sequence'}} )
if self.use_past:
# BLOOM stores values on dynamic axis 2. For more details see: https://github.com/huggingface/transformers/pull/18344
self.fill_with_past_key_values_(_SCREAMING_SNAKE_CASE , direction='inputs' , inverted_values_shape=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = {0: 'batch', 1: 'past_sequence + sequence'}
else:
__lowerCAmelCase : int = {0: 'batch', 1: 'sequence'}
return common_inputs
@property
def __lowerCamelCase ( self ):
return self._config.n_layer
@property
def __lowerCamelCase ( self ):
return self._config.n_head
@property
def __lowerCamelCase ( self ):
return 1E-3
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , ):
__lowerCAmelCase : Tuple = super(_SCREAMING_SNAKE_CASE , self ).generate_dummy_inputs(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , seq_length=_SCREAMING_SNAKE_CASE , is_pair=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE )
# We need to order the input in the way they appears in the forward()
__lowerCAmelCase : List[str] = OrderedDict({'input_ids': common_inputs['input_ids']} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__lowerCAmelCase , __lowerCAmelCase : str = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__lowerCAmelCase : Tuple = seqlen + 2
__lowerCAmelCase : int = self._config.hidden_size // self.num_attention_heads
__lowerCAmelCase : Optional[Any] = (
batch * self.num_attention_heads,
head_dim,
past_key_values_length,
)
__lowerCAmelCase : Union[str, Any] = (
batch * self.num_attention_heads,
past_key_values_length,
head_dim,
)
__lowerCAmelCase : Optional[int] = [
(torch.zeros(_SCREAMING_SNAKE_CASE ), torch.zeros(_SCREAMING_SNAKE_CASE )) for _ in range(self.num_layers )
]
__lowerCAmelCase : List[str] = common_inputs['attention_mask']
if self.use_past:
__lowerCAmelCase : Optional[int] = ordered_inputs['attention_mask'].dtype
__lowerCAmelCase : List[Any] = torch.cat(
[ordered_inputs['attention_mask'], torch.ones(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )] , dim=1 )
return ordered_inputs
@property
def __lowerCamelCase ( self ):
return 13 | 86 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , ):
__lowerCAmelCase : Tuple = parent
__lowerCAmelCase : Optional[int] = 13
__lowerCAmelCase : List[Any] = 7
__lowerCAmelCase : int = True
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : List[Any] = True
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : Optional[Any] = 99
__lowerCAmelCase : int = 3_84
__lowerCAmelCase : Union[str, Any] = 2
__lowerCAmelCase : Tuple = 4
__lowerCAmelCase : str = 37
__lowerCAmelCase : Any = 'gelu'
__lowerCAmelCase : List[str] = 0.1
__lowerCAmelCase : Any = 0.1
__lowerCAmelCase : Union[str, Any] = 5_12
__lowerCAmelCase : int = 16
__lowerCAmelCase : Union[str, Any] = 2
__lowerCAmelCase : int = 0.02
__lowerCAmelCase : Dict = 3
__lowerCAmelCase : Tuple = 4
__lowerCAmelCase : Tuple = 1_28
__lowerCAmelCase : Optional[int] = 2
__lowerCAmelCase : List[str] = 9
__lowerCAmelCase : int = 1
__lowerCAmelCase : int = None
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : Optional[int] = None
if self.use_input_mask:
__lowerCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : Tuple = None
if self.use_token_type_ids:
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase : Optional[Any] = None
__lowerCAmelCase : Dict = None
__lowerCAmelCase : Union[str, Any] = None
if self.use_labels:
__lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase : Union[str, Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_SCREAMING_SNAKE_CASE , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = TFConvBertModel(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__lowerCAmelCase : Tuple = [input_ids, input_mask]
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = TFConvBertForMaskedLM(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = self.num_labels
__lowerCAmelCase : Optional[Any] = TFConvBertForSequenceClassification(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = self.num_choices
__lowerCAmelCase : List[str] = TFConvBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase : Dict = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase : Union[str, Any] = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase : Tuple = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__lowerCAmelCase : str = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = self.num_labels
__lowerCAmelCase : Any = TFConvBertForTokenClassification(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = TFConvBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) : List[str] = config_and_inputs
__lowerCAmelCase : Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : List[str] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A_ : str = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A_ : List[Any] = False
A_ : str = False
A_ : List[Any] = False
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = TFConvBertModelTester(self )
__lowerCAmelCase : Any = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Any = True
__lowerCAmelCase : Dict = True
if hasattr(_SCREAMING_SNAKE_CASE , 'use_cache' ):
__lowerCAmelCase : int = True
__lowerCAmelCase : List[str] = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__lowerCAmelCase : str = getattr(self.model_tester , 'key_length' , _SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
__lowerCAmelCase : str = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = len(model(_SCREAMING_SNAKE_CASE ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE , saved_model=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = os.path.join(_SCREAMING_SNAKE_CASE , 'saved_model' , '1' )
__lowerCAmelCase : int = tf.keras.models.load_model(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
__lowerCAmelCase : List[str] = outputs['encoder_hidden_states']
__lowerCAmelCase : Tuple = outputs['encoder_attentions']
else:
__lowerCAmelCase : Optional[int] = outputs['hidden_states']
__lowerCAmelCase : Tuple = outputs['attentions']
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Optional[Any] = True
__lowerCAmelCase : List[Any] = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
__lowerCAmelCase : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__lowerCAmelCase : Tuple = getattr(self.model_tester , 'key_length' , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = getattr(self.model_tester , 'key_length' , _SCREAMING_SNAKE_CASE )
def check_decoder_attentions_output(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
self.assertEqual(out_len % 2 , 0 )
__lowerCAmelCase : Optional[Any] = outputs.decoder_attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : Optional[int] = False
__lowerCAmelCase : List[Any] = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Tuple = len(_SCREAMING_SNAKE_CASE )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
__lowerCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_decoder_attentions_output(_SCREAMING_SNAKE_CASE )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowerCAmelCase : Optional[Any] = True
__lowerCAmelCase : str = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
__lowerCAmelCase : Dict = True
__lowerCAmelCase : Optional[Any] = True
__lowerCAmelCase : List[Any] = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_SCREAMING_SNAKE_CASE ) )
self.assertEqual(model.config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
@require_tf
class A__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
__lowerCAmelCase : int = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowerCAmelCase : Tuple = model(_SCREAMING_SNAKE_CASE )[0]
__lowerCAmelCase : Tuple = [1, 6, 7_68]
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) | 86 | 1 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : Any = RoCBertTokenizer
A_ : List[Any] = None
A_ : List[str] = False
A_ : Optional[Any] = True
A_ : Tuple = filter_non_english
def __lowerCamelCase ( self ):
super().setUp()
__lowerCAmelCase : Tuple = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', '你', '好', '是', '谁', 'a', 'b', 'c', 'd']
__lowerCAmelCase : Dict = {}
__lowerCAmelCase : Optional[int] = {}
for i, value in enumerate(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = i
__lowerCAmelCase : Any = i
__lowerCAmelCase : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowerCAmelCase : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_shape_file'] )
__lowerCAmelCase : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['word_pronunciation_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.word_shape_file , 'w' , encoding='utf-8' ) as word_shape_writer:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE )
with open(self.word_pronunciation_file , 'w' , encoding='utf-8' ) as word_pronunciation_writer:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__lowerCAmelCase : Optional[Any] = tokenizer.tokenize('你好[SEP]你是谁' )
self.assertListEqual(_SCREAMING_SNAKE_CASE , ['你', '好', '[SEP]', '你', '是', '谁'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(_SCREAMING_SNAKE_CASE ) , [5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(_SCREAMING_SNAKE_CASE ) , [5, 6, 2, 5, 7, 8] )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) , ['ah', '\u535A', '\u63A8', 'zz'] )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = RoCBertBasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = RoCBertBasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['h\u00E9llo'] )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = RoCBertBasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = RoCBertBasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) , ['hello'] )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = RoCBertBasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = RoCBertBasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = RoCBertBasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE , strip_accents=_SCREAMING_SNAKE_CASE )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) , ['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = RoCBertBasicTokenizer(do_lower_case=_SCREAMING_SNAKE_CASE , never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) , ['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
__lowerCAmelCase : str = {}
for i, token in enumerate(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = i
__lowerCAmelCase : Optional[int] = RoCBertWordpieceTokenizer(vocab=_SCREAMING_SNAKE_CASE , unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) , [] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) , ['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) , ['[UNK]', 'runn', '##ing'] )
def __lowerCamelCase ( self ):
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def __lowerCamelCase ( self ):
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def __lowerCamelCase ( self ):
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
if self.test_rust_tokenizer:
__lowerCAmelCase : str = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) for t in ['Test', '\xad', 'test']] , [['[UNK]'], [], ['[UNK]']] )
def __lowerCamelCase ( self ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowerCAmelCase : Any = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = f"A, naïve {tokenizer_r.mask_token} AllenNLP sentence."
__lowerCAmelCase : Optional[Any] = tokenizer_r.encode_plus(
_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE , return_offsets_mapping=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = tokenizer_r.do_lower_case if hasattr(_SCREAMING_SNAKE_CASE , 'do_lower_case' ) else False
__lowerCAmelCase : Any = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] , tokens['offset_mapping'] )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = ['的', '人', '有']
__lowerCAmelCase : List[Any] = ''.join(_SCREAMING_SNAKE_CASE )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"{tokenizer.__class__.__name__} ({pretrained_name})" ):
__lowerCAmelCase : int = True
__lowerCAmelCase : List[Any] = self.tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = tokenizer_p.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = tokenizer_r.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = tokenizer_r.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = tokenizer_p.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = False
__lowerCAmelCase : int = self.rust_tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = self.tokenizer_class.from_pretrained(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = tokenizer_r.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = tokenizer_p.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = tokenizer_r.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = tokenizer_p.convert_ids_to_tokens(_SCREAMING_SNAKE_CASE )
# it is expected that only the first Chinese character is not preceded by "##".
__lowerCAmelCase : Tuple = [
f"##{token}" if idx != 0 else token for idx, token in enumerate(_SCREAMING_SNAKE_CASE )
]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.tokenizer_class(self.vocab_file , self.word_shape_file , self.word_pronunciation_file )
__lowerCAmelCase : List[Any] = tokenizer.encode('你好' , add_special_tokens=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = tokenizer.encode('你是谁' , add_special_tokens=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = self.get_tokenizers(do_lower_case=_SCREAMING_SNAKE_CASE )
for tokenizer in tokenizers:
with self.subTest(f"{tokenizer.__class__.__name__}" ):
__lowerCAmelCase : Union[str, Any] = '你好,你是谁'
__lowerCAmelCase : Optional[int] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_ids(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = tokenizer.convert_tokens_to_shape_ids(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = tokenizer.convert_tokens_to_pronunciation_ids(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = tokenizer.prepare_for_model(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = tokenizer.encode_plus(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) | 86 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class A__ ( unittest.TestCase):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=4_00 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 / 2_55 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__lowerCAmelCase : Any = size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33}
__lowerCAmelCase : Optional[int] = parent
__lowerCAmelCase : int = batch_size
__lowerCAmelCase : str = num_channels
__lowerCAmelCase : Optional[int] = min_resolution
__lowerCAmelCase : List[Any] = max_resolution
__lowerCAmelCase : Union[str, Any] = do_resize
__lowerCAmelCase : Optional[Any] = size
__lowerCAmelCase : Dict = do_rescale
__lowerCAmelCase : Optional[Any] = rescale_factor
__lowerCAmelCase : Any = do_normalize
__lowerCAmelCase : List[str] = image_mean
__lowerCAmelCase : Union[str, Any] = image_std
__lowerCAmelCase : Optional[int] = do_pad
def __lowerCamelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
if not batched:
__lowerCAmelCase : str = image_inputs[0]
if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ):
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = image.size
else:
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = image.shape[1], image.shape[2]
if w < h:
__lowerCAmelCase : str = int(self.size['shortest_edge'] * h / w )
__lowerCAmelCase : Optional[int] = self.size['shortest_edge']
elif w > h:
__lowerCAmelCase : str = self.size['shortest_edge']
__lowerCAmelCase : Union[str, Any] = int(self.size['shortest_edge'] * w / h )
else:
__lowerCAmelCase : str = self.size['shortest_edge']
__lowerCAmelCase : Optional[Any] = self.size['shortest_edge']
else:
__lowerCAmelCase : str = []
for image in image_inputs:
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCAmelCase : Any = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0]
__lowerCAmelCase : Dict = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : List[str] = DetrImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = DetrImageProcessingTester(self )
@property
def __lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_rescale' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'rescale_factor' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
# Initialize image_processing
__lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__lowerCAmelCase : int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase : int = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
# Initialize image_processing
__lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
__lowerCAmelCase : Tuple = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : Tuple = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
# Initialize image_processing
__lowerCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
__lowerCAmelCase : Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : Dict = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase : Tuple = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : Any = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __lowerCamelCase ( self ):
# prepare image and target
__lowerCAmelCase : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
__lowerCAmelCase : Any = json.loads(f.read() )
__lowerCAmelCase : Tuple = {'image_id': 3_97_69, 'annotations': target}
# encode them
__lowerCAmelCase : Dict = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
__lowerCAmelCase : int = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
__lowerCAmelCase : str = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
# verify area
__lowerCAmelCase : List[str] = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
__lowerCAmelCase : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# verify image_id
__lowerCAmelCase : Dict = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
__lowerCAmelCase : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
__lowerCAmelCase : Union[str, Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify orig_size
__lowerCAmelCase : int = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
__lowerCAmelCase : List[Any] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
@slow
def __lowerCamelCase ( self ):
# prepare image, target and masks_path
__lowerCAmelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
__lowerCAmelCase : Optional[int] = json.loads(f.read() )
__lowerCAmelCase : Optional[int] = {'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target}
__lowerCAmelCase : Union[str, Any] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
__lowerCAmelCase : Optional[int] = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
__lowerCAmelCase : Optional[Any] = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , masks_path=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
__lowerCAmelCase : str = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
# verify area
__lowerCAmelCase : int = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
__lowerCAmelCase : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# verify image_id
__lowerCAmelCase : str = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
__lowerCAmelCase : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
__lowerCAmelCase : str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify masks
__lowerCAmelCase : Dict = 82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _SCREAMING_SNAKE_CASE )
# verify orig_size
__lowerCAmelCase : str = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
__lowerCAmelCase : List[Any] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) ) | 86 | 1 |
"""simple docstring"""
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class A__ ( nn.Module):
A_ : int
A_ : int
A_ : float = 0.0
A_ : int = 1
A_ : int = 1
A_ : bool = True
A_ : bool = False
A_ : bool = False
A_ : bool = False
A_ : jnp.dtype = jnp.floataa
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = []
__lowerCAmelCase : Any = []
for i in range(self.num_layers ):
__lowerCAmelCase : Tuple = self.in_channels if i == 0 else self.out_channels
__lowerCAmelCase : Any = FlaxResnetBlockaD(
in_channels=_SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = resnets
__lowerCAmelCase : Optional[int] = attentions
if self.add_downsample:
__lowerCAmelCase : str = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ):
__lowerCAmelCase : Tuple = ()
for resnet, attn in zip(self.resnets , self.attentions ):
__lowerCAmelCase : Tuple = resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = attn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
if self.add_downsample:
__lowerCAmelCase : Tuple = self.downsamplers_a(_SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
return hidden_states, output_states
class A__ ( nn.Module):
A_ : int
A_ : int
A_ : float = 0.0
A_ : int = 1
A_ : bool = True
A_ : jnp.dtype = jnp.floataa
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = []
for i in range(self.num_layers ):
__lowerCAmelCase : Union[str, Any] = self.in_channels if i == 0 else self.out_channels
__lowerCAmelCase : str = FlaxResnetBlockaD(
in_channels=_SCREAMING_SNAKE_CASE , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = resnets
if self.add_downsample:
__lowerCAmelCase : Optional[Any] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ):
__lowerCAmelCase : List[str] = ()
for resnet in self.resnets:
__lowerCAmelCase : List[Any] = resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
if self.add_downsample:
__lowerCAmelCase : Union[str, Any] = self.downsamplers_a(_SCREAMING_SNAKE_CASE )
output_states += (hidden_states,)
return hidden_states, output_states
class A__ ( nn.Module):
A_ : int
A_ : int
A_ : int
A_ : float = 0.0
A_ : int = 1
A_ : int = 1
A_ : bool = True
A_ : bool = False
A_ : bool = False
A_ : bool = False
A_ : jnp.dtype = jnp.floataa
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = []
__lowerCAmelCase : Union[str, Any] = []
for i in range(self.num_layers ):
__lowerCAmelCase : Optional[Any] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__lowerCAmelCase : Optional[int] = self.prev_output_channel if i == 0 else self.out_channels
__lowerCAmelCase : List[str] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = resnets
__lowerCAmelCase : Optional[Any] = attentions
if self.add_upsample:
__lowerCAmelCase : List[str] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ):
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
__lowerCAmelCase : Union[str, Any] = res_hidden_states_tuple[-1]
__lowerCAmelCase : List[str] = res_hidden_states_tuple[:-1]
__lowerCAmelCase : str = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__lowerCAmelCase : Union[str, Any] = resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = attn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
if self.add_upsample:
__lowerCAmelCase : Optional[Any] = self.upsamplers_a(_SCREAMING_SNAKE_CASE )
return hidden_states
class A__ ( nn.Module):
A_ : int
A_ : int
A_ : int
A_ : float = 0.0
A_ : int = 1
A_ : bool = True
A_ : jnp.dtype = jnp.floataa
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = []
for i in range(self.num_layers ):
__lowerCAmelCase : int = self.in_channels if (i == self.num_layers - 1) else self.out_channels
__lowerCAmelCase : List[Any] = self.prev_output_channel if i == 0 else self.out_channels
__lowerCAmelCase : Union[str, Any] = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = resnets
if self.add_upsample:
__lowerCAmelCase : Any = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ):
for resnet in self.resnets:
# pop res hidden states
__lowerCAmelCase : Any = res_hidden_states_tuple[-1]
__lowerCAmelCase : List[Any] = res_hidden_states_tuple[:-1]
__lowerCAmelCase : Optional[Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
__lowerCAmelCase : List[Any] = resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
if self.add_upsample:
__lowerCAmelCase : Dict = self.upsamplers_a(_SCREAMING_SNAKE_CASE )
return hidden_states
class A__ ( nn.Module):
A_ : int
A_ : float = 0.0
A_ : int = 1
A_ : int = 1
A_ : bool = False
A_ : bool = False
A_ : jnp.dtype = jnp.floataa
def __lowerCamelCase ( self ):
# there is always at least one resnet
__lowerCAmelCase : str = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
__lowerCAmelCase : List[Any] = []
for _ in range(self.num_layers ):
__lowerCAmelCase : List[str] = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = resnets
__lowerCAmelCase : Union[str, Any] = attentions
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=True ):
__lowerCAmelCase : int = self.resnets[0](_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
__lowerCAmelCase : int = attn(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = resnet(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , deterministic=_SCREAMING_SNAKE_CASE )
return hidden_states | 86 |
"""simple docstring"""
import numpy as np
def __lowerCAmelCase (_UpperCamelCase ):
return 1 / (1 + np.exp(-vector ))
def __lowerCAmelCase (_UpperCamelCase ):
return vector * sigmoid(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 | 1 |
"""simple docstring"""
from math import factorial, radians
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase = 18 , _UpperCamelCase = 10 ):
__lowerCAmelCase : List[Any] = angle_in_degrees - ((angle_in_degrees // 360.0) * 360.0)
# Converting from degrees to radians
__lowerCAmelCase : Dict = radians(_UpperCamelCase )
__lowerCAmelCase : Optional[Any] = angle_in_radians
__lowerCAmelCase : Optional[int] = 3
__lowerCAmelCase : Any = -1
for _ in range(_UpperCamelCase ):
result += (b * (angle_in_radians**a)) / factorial(_UpperCamelCase )
__lowerCAmelCase : Union[str, Any] = -b # One positive term and the next will be negative and so on...
a += 2 # Increased by 2 for every term.
return round(_UpperCamelCase , _UpperCamelCase )
if __name__ == "__main__":
__import__("""doctest""").testmod() | 86 |
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , ):
__lowerCAmelCase : Optional[int] = parent
__lowerCAmelCase : Union[str, Any] = batch_size
__lowerCAmelCase : Dict = seq_length
__lowerCAmelCase : Dict = is_training
__lowerCAmelCase : List[str] = use_input_mask
__lowerCAmelCase : int = use_token_type_ids
__lowerCAmelCase : Optional[int] = use_labels
__lowerCAmelCase : List[Any] = vocab_size
__lowerCAmelCase : Dict = hidden_size
__lowerCAmelCase : Tuple = embedding_size
__lowerCAmelCase : List[Any] = num_hidden_layers
__lowerCAmelCase : Tuple = num_attention_heads
__lowerCAmelCase : Union[str, Any] = intermediate_size
__lowerCAmelCase : Optional[Any] = hidden_act
__lowerCAmelCase : Optional[int] = hidden_dropout_prob
__lowerCAmelCase : Dict = attention_probs_dropout_prob
__lowerCAmelCase : Any = max_position_embeddings
__lowerCAmelCase : Any = type_vocab_size
__lowerCAmelCase : Union[str, Any] = type_sequence_label_size
__lowerCAmelCase : List[str] = initializer_range
__lowerCAmelCase : str = num_labels
__lowerCAmelCase : int = num_choices
__lowerCAmelCase : Union[str, Any] = scope
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : Optional[int] = None
if self.use_input_mask:
__lowerCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : str = None
if self.use_token_type_ids:
__lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase : Union[str, Any] = None
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : Union[str, Any] = None
if self.use_labels:
__lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self ):
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = MobileBertModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = MobileBertForMaskedLM(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = MobileBertForNextSentencePrediction(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Dict = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = MobileBertForPreTraining(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : List[Any] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , next_sentence_label=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = MobileBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : List[str] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = self.num_labels
__lowerCAmelCase : Tuple = MobileBertForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : str = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = self.num_labels
__lowerCAmelCase : int = MobileBertForTokenClassification(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = self.num_choices
__lowerCAmelCase : List[str] = MobileBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : List[str] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) : Optional[Any] = config_and_inputs
__lowerCAmelCase : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : str = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
A_ : List[str] = (
{
'feature-extraction': MobileBertModel,
'fill-mask': MobileBertForMaskedLM,
'question-answering': MobileBertForQuestionAnswering,
'text-classification': MobileBertForSequenceClassification,
'token-classification': MobileBertForTokenClassification,
'zero-shot': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ : Dict = True
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
__lowerCAmelCase : List[str] = super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
if return_labels:
if model_class in get_values(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
return inputs_dict
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = MobileBertModelTester(self )
__lowerCAmelCase : str = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase (_UpperCamelCase ):
return torch.tensor(
_UpperCamelCase , dtype=torch.long , device=_UpperCamelCase , )
lowerCamelCase__ = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
__lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE )[0]
__lowerCAmelCase : Dict = torch.Size((1, 9, 5_12) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = torch.tensor(
[
[
[-2.4_73_65_26E07, 8.2_69_16_56E04, 1.6_52_18_38E05],
[-5.7_54_17_04E-01, 3.9_05_60_22E00, 4.4_01_15_07E00],
[2.6_04_73_59E00, 1.5_67_76_52E00, -1.7_32_41_88E-01],
]
] , device=_SCREAMING_SNAKE_CASE , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
__lowerCAmelCase : Tuple = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
__lowerCAmelCase : Union[str, Any] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound ) | 86 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {"""configuration_beit""": ["""BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """BeitConfig""", """BeitOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""BeitFeatureExtractor"""]
lowerCamelCase__ = ["""BeitImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""BEIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""BeitForImageClassification""",
"""BeitForMaskedImageModeling""",
"""BeitForSemanticSegmentation""",
"""BeitModel""",
"""BeitPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""FlaxBeitForImageClassification""",
"""FlaxBeitForMaskedImageModeling""",
"""FlaxBeitModel""",
"""FlaxBeitPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_beit import BEIT_PRETRAINED_CONFIG_ARCHIVE_MAP, BeitConfig, BeitOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_beit import BeitFeatureExtractor
from .image_processing_beit import BeitImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_beit import (
BEIT_PRETRAINED_MODEL_ARCHIVE_LIST,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
BeitPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_beit import (
FlaxBeitForImageClassification,
FlaxBeitForMaskedImageModeling,
FlaxBeitModel,
FlaxBeitPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 86 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class A__ ( _lowerCamelCase):
A_ : Any = ['image_processor', 'tokenizer']
A_ : Optional[Any] = 'AutoImageProcessor'
A_ : str = 'AutoTokenizer'
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = kwargs.pop('feature_extractor' )
__lowerCAmelCase : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = self.image_processor
__lowerCAmelCase : Tuple = False
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = kwargs.pop('images' , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = kwargs.pop('text' , _SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
__lowerCAmelCase : Dict = args[0]
__lowerCAmelCase : Union[str, Any] = args[1:]
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
__lowerCAmelCase : Union[str, Any] = self.image_processor(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None:
__lowerCAmelCase : Dict = self.tokenizer(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is None:
return inputs
elif images is None:
return encodings
else:
__lowerCAmelCase : Union[str, Any] = encodings['input_ids']
return inputs
def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@contextmanager
def __lowerCamelCase ( self ):
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your images inputs, or in a separate call.' )
__lowerCAmelCase : Any = True
__lowerCAmelCase : Dict = self.tokenizer
yield
__lowerCAmelCase : Optional[int] = self.image_processor
__lowerCAmelCase : Optional[Any] = False
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None ):
if added_vocab is None:
__lowerCAmelCase : str = self.tokenizer.get_added_vocab()
__lowerCAmelCase : List[Any] = {}
while tokens:
__lowerCAmelCase : int = re.search(R'<s_(.*?)>' , _SCREAMING_SNAKE_CASE , re.IGNORECASE )
if start_token is None:
break
__lowerCAmelCase : Union[str, Any] = start_token.group(1 )
__lowerCAmelCase : Tuple = re.search(Rf"</s_{key}>" , _SCREAMING_SNAKE_CASE , re.IGNORECASE )
__lowerCAmelCase : str = start_token.group()
if end_token is None:
__lowerCAmelCase : Optional[int] = tokens.replace(_SCREAMING_SNAKE_CASE , '' )
else:
__lowerCAmelCase : Optional[Any] = end_token.group()
__lowerCAmelCase : Tuple = re.escape(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = re.escape(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = re.search(f"{start_token_escaped}(.*?){end_token_escaped}" , _SCREAMING_SNAKE_CASE , re.IGNORECASE )
if content is not None:
__lowerCAmelCase : List[str] = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
__lowerCAmelCase : int = self.tokenajson(_SCREAMING_SNAKE_CASE , is_inner_value=_SCREAMING_SNAKE_CASE , added_vocab=_SCREAMING_SNAKE_CASE )
if value:
if len(_SCREAMING_SNAKE_CASE ) == 1:
__lowerCAmelCase : Tuple = value[0]
__lowerCAmelCase : Tuple = value
else: # leaf nodes
__lowerCAmelCase : Any = []
for leaf in content.split(R'<sep/>' ):
__lowerCAmelCase : List[Any] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
__lowerCAmelCase : Dict = leaf[1:-2] # for categorical special tokens
output[key].append(_SCREAMING_SNAKE_CASE )
if len(output[key] ) == 1:
__lowerCAmelCase : str = output[key][0]
__lowerCAmelCase : Dict = tokens[tokens.find(_SCREAMING_SNAKE_CASE ) + len(_SCREAMING_SNAKE_CASE ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=_SCREAMING_SNAKE_CASE , added_vocab=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def __lowerCamelCase ( self ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def __lowerCamelCase ( self ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _SCREAMING_SNAKE_CASE , )
return self.image_processor | 86 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import DDIMScheduler, KandinskyVaaPipeline, KandinskyVaaPriorPipeline, UNetaDConditionModel, VQModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : Optional[int] = KandinskyVaaPipeline
A_ : Union[str, Any] = [
'image_embeds',
'negative_image_embeds',
]
A_ : Any = ['image_embeds', 'negative_image_embeds']
A_ : Optional[Any] = [
'generator',
'height',
'width',
'latents',
'guidance_scale',
'num_inference_steps',
'return_dict',
'guidance_scale',
'num_images_per_prompt',
'output_type',
'return_dict',
]
A_ : Optional[Any] = False
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return self.time_input_dim
@property
def __lowerCamelCase ( self ):
return self.time_input_dim * 4
@property
def __lowerCamelCase ( self ):
return 1_00
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Dict = {
'in_channels': 4,
# Out channels is double in channels because predicts mean and variance
'out_channels': 8,
'addition_embed_type': 'image',
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'encoder_hid_dim': self.text_embedder_hidden_size,
'encoder_hid_dim_type': 'image_proj',
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': None,
}
__lowerCAmelCase : str = UNetaDConditionModel(**_SCREAMING_SNAKE_CASE )
return model
@property
def __lowerCamelCase ( self ):
return {
"block_out_channels": [32, 64],
"down_block_types": ["DownEncoderBlock2D", "AttnDownEncoderBlock2D"],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": [
"AttnUpDecoderBlock2D",
"UpDecoderBlock2D",
],
"vq_embed_dim": 4,
}
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Optional[int] = VQModel(**self.dummy_movq_kwargs )
return model
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = self.dummy_unet
__lowerCAmelCase : Tuple = self.dummy_movq
__lowerCAmelCase : Optional[Any] = DDIMScheduler(
num_train_timesteps=10_00 , beta_schedule='linear' , beta_start=0.0_0085 , beta_end=0.012 , clip_sample=_SCREAMING_SNAKE_CASE , set_alpha_to_one=_SCREAMING_SNAKE_CASE , steps_offset=1 , prediction_type='epsilon' , thresholding=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : List[str] = {
'unet': unet,
'scheduler': scheduler,
'movq': movq,
}
return components
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
__lowerCAmelCase : Dict = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = floats_tensor((1, self.text_embedder_hidden_size) , rng=random.Random(seed + 1 ) ).to(
_SCREAMING_SNAKE_CASE )
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
__lowerCAmelCase : str = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : Dict = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = {
'image_embeds': image_embeds,
'negative_image_embeds': negative_image_embeds,
'generator': generator,
'height': 64,
'width': 64,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = 'cpu'
__lowerCAmelCase : int = self.get_dummy_components()
__lowerCAmelCase : Optional[int] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = pipe(**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : List[str] = output.images
__lowerCAmelCase : Any = pipe(
**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) , return_dict=_SCREAMING_SNAKE_CASE , )[0]
__lowerCAmelCase : List[str] = image[0, -3:, -3:, -1]
__lowerCAmelCase : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
__lowerCAmelCase : List[Any] = np.array(
[0.623_7976, 1.0, 0.3644_1332, 1.0, 0.7063_9634, 0.2987_7186, 0.8565_2125, 0.521_6843, 0.5445_4046] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_slice.flatten()}"
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
), f" expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}"
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/kandinskyv22/kandinskyv22_text2img_cat_fp16.npy' )
__lowerCAmelCase : Dict = KandinskyVaaPriorPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-prior' , torch_dtype=torch.floataa )
pipe_prior.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = KandinskyVaaPipeline.from_pretrained(
'kandinsky-community/kandinsky-2-2-decoder' , torch_dtype=torch.floataa )
__lowerCAmelCase : Union[str, Any] = pipeline.to(_SCREAMING_SNAKE_CASE )
pipeline.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = 'red cat, 4k photo'
__lowerCAmelCase : Union[str, Any] = torch.Generator(device='cuda' ).manual_seed(0 )
__lowerCAmelCase , __lowerCAmelCase : Tuple = pipe_prior(
_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=5 , negative_prompt='' , ).to_tuple()
__lowerCAmelCase : Optional[int] = torch.Generator(device='cuda' ).manual_seed(0 )
__lowerCAmelCase : str = pipeline(
image_embeds=_SCREAMING_SNAKE_CASE , negative_image_embeds=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=1_00 , output_type='np' , )
__lowerCAmelCase : Dict = output.images[0]
assert image.shape == (5_12, 5_12, 3)
assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) | 86 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Tuple = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def __lowerCAmelCase (_UpperCamelCase = 100 ):
__lowerCAmelCase : Optional[int] = 1
__lowerCAmelCase : Optional[Any] = 2
for i in range(2 , max_n + 1 ):
__lowerCAmelCase : Any = pre_numerator
__lowerCAmelCase : Union[str, Any] = 2 * i // 3 if i % 3 == 0 else 1
__lowerCAmelCase : int = cur_numerator
__lowerCAmelCase : Dict = e_cont * pre_numerator + temp
return sum_digits(_UpperCamelCase )
if __name__ == "__main__":
print(f'{solution() = }') | 86 | 1 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Tuple = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : List[Any] = 0
while number > 0:
__lowerCAmelCase : Tuple = number % 10
sum_of_digits += last_digit
__lowerCAmelCase : int = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def __lowerCAmelCase (_UpperCamelCase = 100 ):
__lowerCAmelCase : Dict = factorial(_UpperCamelCase )
__lowerCAmelCase : int = split_and_add(_UpperCamelCase )
return result
if __name__ == "__main__":
print(solution(int(input("""Enter the Number: """).strip()))) | 86 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""",
"""microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""",
}
class A__ ( _lowerCamelCase):
A_ : List[Any] = 'markuplm'
def __init__( self , _SCREAMING_SNAKE_CASE=3_05_22 , _SCREAMING_SNAKE_CASE=7_68 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=30_72 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2_56 , _SCREAMING_SNAKE_CASE=10_24 , _SCREAMING_SNAKE_CASE=2_16 , _SCREAMING_SNAKE_CASE=10_01 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=50 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ):
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = vocab_size
__lowerCAmelCase : Any = hidden_size
__lowerCAmelCase : List[Any] = num_hidden_layers
__lowerCAmelCase : Tuple = num_attention_heads
__lowerCAmelCase : Union[str, Any] = hidden_act
__lowerCAmelCase : List[Any] = intermediate_size
__lowerCAmelCase : List[str] = hidden_dropout_prob
__lowerCAmelCase : List[str] = attention_probs_dropout_prob
__lowerCAmelCase : Optional[int] = max_position_embeddings
__lowerCAmelCase : int = type_vocab_size
__lowerCAmelCase : Tuple = initializer_range
__lowerCAmelCase : int = layer_norm_eps
__lowerCAmelCase : List[str] = position_embedding_type
__lowerCAmelCase : List[Any] = use_cache
__lowerCAmelCase : Optional[Any] = classifier_dropout
# additional properties
__lowerCAmelCase : Optional[int] = max_depth
__lowerCAmelCase : List[str] = max_xpath_tag_unit_embeddings
__lowerCAmelCase : Optional[Any] = max_xpath_subs_unit_embeddings
__lowerCAmelCase : Any = tag_pad_id
__lowerCAmelCase : Union[str, Any] = subs_pad_id
__lowerCAmelCase : int = xpath_unit_hidden_size | 86 | 1 |
"""simple docstring"""
lowerCamelCase__ = {
"""A""": """.-""", """B""": """-...""", """C""": """-.-.""", """D""": """-..""", """E""": """.""", """F""": """..-.""", """G""": """--.""",
"""H""": """....""", """I""": """..""", """J""": """.---""", """K""": """-.-""", """L""": """.-..""", """M""": """--""", """N""": """-.""",
"""O""": """---""", """P""": """.--.""", """Q""": """--.-""", """R""": """.-.""", """S""": """...""", """T""": """-""", """U""": """..-""",
"""V""": """...-""", """W""": """.--""", """X""": """-..-""", """Y""": """-.--""", """Z""": """--..""", """1""": """.----""",
"""2""": """..---""", """3""": """...--""", """4""": """....-""", """5""": """.....""", """6""": """-....""", """7""": """--...""",
"""8""": """---..""", """9""": """----.""", """0""": """-----""", """&""": """.-...""", """@""": """.--.-.""",
""":""": """---...""", """,""": """--..--""", """.""": """.-.-.-""", """'""": """.----.""", """\"""": """.-..-.""",
"""?""": """..--..""", """/""": """-..-.""", """=""": """-...-""", """+""": """.-.-.""", """-""": """-....-""",
"""(""": """-.--.""", """)""": """-.--.-""", """!""": """-.-.--""", """ """: """/"""
} # Exclamation mark is not in ITU-R recommendation
# fmt: on
lowerCamelCase__ = {value: key for key, value in MORSE_CODE_DICT.items()}
def __lowerCAmelCase (_UpperCamelCase ):
return " ".join(MORSE_CODE_DICT[char] for char in message.upper() )
def __lowerCAmelCase (_UpperCamelCase ):
return "".join(REVERSE_DICT[char] for char in message.split() )
def __lowerCAmelCase ():
__lowerCAmelCase : Optional[Any] = 'Morse code here!'
print(_UpperCamelCase )
__lowerCAmelCase : List[str] = encrypt(_UpperCamelCase )
print(_UpperCamelCase )
__lowerCAmelCase : Optional[Any] = decrypt(_UpperCamelCase )
print(_UpperCamelCase )
if __name__ == "__main__":
main() | 86 |
"""simple docstring"""
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
lowerCamelCase__ = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
for attribute in key.split('.' ):
__lowerCAmelCase : str = getattr(_UpperCamelCase , _UpperCamelCase )
if weight_type is not None:
__lowerCAmelCase : Tuple = getattr(_UpperCamelCase , _UpperCamelCase ).shape
else:
__lowerCAmelCase : Dict = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
__lowerCAmelCase : Union[str, Any] = value
elif weight_type == "weight_g":
__lowerCAmelCase : List[Any] = value
elif weight_type == "weight_v":
__lowerCAmelCase : Any = value
elif weight_type == "bias":
__lowerCAmelCase : List[str] = value
else:
__lowerCAmelCase : List[Any] = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Any = []
__lowerCAmelCase : Optional[int] = fairseq_model.state_dict()
__lowerCAmelCase : Union[str, Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__lowerCAmelCase : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hf_model.config.feat_extract_norm == 'group' , )
__lowerCAmelCase : str = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__lowerCAmelCase : int = True
if "*" in mapped_key:
__lowerCAmelCase : List[str] = name.split(_UpperCamelCase )[0].split('.' )[-2]
__lowerCAmelCase : Optional[Any] = mapped_key.replace('*' , _UpperCamelCase )
if "weight_g" in name:
__lowerCAmelCase : Union[str, Any] = 'weight_g'
elif "weight_v" in name:
__lowerCAmelCase : int = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
__lowerCAmelCase : Optional[Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCAmelCase : List[str] = 'weight'
else:
__lowerCAmelCase : Optional[Any] = None
set_recursively(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
continue
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(F"Unused weights: {unused_weights}" )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : List[Any] = full_name.split('conv_layers.' )[-1]
__lowerCAmelCase : Any = name.split('.' )
__lowerCAmelCase : List[Any] = int(items[0] )
__lowerCAmelCase : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
__lowerCAmelCase : Tuple = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
__lowerCAmelCase : int = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
__lowerCAmelCase : Optional[Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
__lowerCAmelCase : Any = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_UpperCamelCase )
@torch.no_grad()
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
# load the pre-trained checkpoints
__lowerCAmelCase : Any = torch.load(_UpperCamelCase )
__lowerCAmelCase : List[str] = WavLMConfigOrig(checkpoint['cfg'] )
__lowerCAmelCase : Optional[Any] = WavLMOrig(_UpperCamelCase )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
__lowerCAmelCase : Dict = WavLMConfig.from_pretrained(_UpperCamelCase )
else:
__lowerCAmelCase : List[str] = WavLMConfig()
__lowerCAmelCase : List[str] = WavLMModel(_UpperCamelCase )
recursively_load_weights(_UpperCamelCase , _UpperCamelCase )
hf_wavlm.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
lowerCamelCase__ = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 86 | 1 |
"""simple docstring"""
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
lowerCamelCase__ = data_utils.TransfoXLTokenizer
lowerCamelCase__ = data_utils.TransfoXLCorpus
lowerCamelCase__ = data_utils
lowerCamelCase__ = data_utils
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(_UpperCamelCase , 'rb' ) as fp:
__lowerCAmelCase : Dict = pickle.load(_UpperCamelCase , encoding='latin1' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
__lowerCAmelCase : Union[str, Any] = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file']
print(F"Save vocabulary to {pytorch_vocab_dump_path}" )
__lowerCAmelCase : Optional[int] = corpus.vocab.__dict__
torch.save(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : Tuple = corpus.__dict__
corpus_dict_no_vocab.pop('vocab' , _UpperCamelCase )
__lowerCAmelCase : Tuple = pytorch_dump_folder_path + '/' + CORPUS_NAME
print(F"Save dataset to {pytorch_dataset_dump_path}" )
torch.save(_UpperCamelCase , _UpperCamelCase )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
__lowerCAmelCase : str = os.path.abspath(_UpperCamelCase )
__lowerCAmelCase : Any = os.path.abspath(_UpperCamelCase )
print(F"Converting Transformer XL checkpoint from {tf_path} with config at {config_path}." )
# Initialise PyTorch model
if transfo_xl_config_file == "":
__lowerCAmelCase : List[Any] = TransfoXLConfig()
else:
__lowerCAmelCase : List[Any] = TransfoXLConfig.from_json_file(_UpperCamelCase )
print(F"Building PyTorch model from configuration: {config}" )
__lowerCAmelCase : Union[str, Any] = TransfoXLLMHeadModel(_UpperCamelCase )
__lowerCAmelCase : Tuple = load_tf_weights_in_transfo_xl(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# Save pytorch-model
__lowerCAmelCase : str = os.path.join(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : Union[str, Any] = os.path.join(_UpperCamelCase , _UpperCamelCase )
print(F"Save PyTorch model to {os.path.abspath(_UpperCamelCase )}" )
torch.save(model.state_dict() , _UpperCamelCase )
print(F"Save configuration file to {os.path.abspath(_UpperCamelCase )}" )
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""--pytorch_dump_folder_path""",
default=None,
type=str,
required=True,
help="""Path to the folder to store the PyTorch model or dataset/vocab.""",
)
parser.add_argument(
"""--tf_checkpoint_path""",
default="""""",
type=str,
help="""An optional path to a TensorFlow checkpoint path to be converted.""",
)
parser.add_argument(
"""--transfo_xl_config_file""",
default="""""",
type=str,
help=(
"""An optional config json file corresponding to the pre-trained BERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--transfo_xl_dataset_file""",
default="""""",
type=str,
help="""An optional dataset file to be converted in a vocabulary.""",
)
lowerCamelCase__ = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
) | 86 |
"""simple docstring"""
import os
import pytest
from attr import dataclass
lowerCamelCase__ = """us-east-1""" # defaults region
@dataclass
class A__ :
A_ : str
A_ : Union[str, Any] = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
A_ : Optional[int] = {
'task_name': 'mnli',
'per_device_train_batch_size': 1_6,
'per_device_eval_batch_size': 1_6,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 5_0_0,
'save_steps': 5_5_0_0,
}
A_ : List[Any] = {**hyperparameters, 'max_steps': 1_0_0_0}
@property
def __lowerCamelCase ( self ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def __lowerCamelCase ( self ):
return f"{self.framework}-transfromers-test"
@property
def __lowerCamelCase ( self ):
return f"./tests/sagemaker/scripts/{self.framework}"
@property
def __lowerCamelCase ( self ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : str = SageMakerTestEnvironment(framework=request.cls.framework ) | 86 | 1 |
"""simple docstring"""
from __future__ import annotations
import time
import numpy as np
lowerCamelCase__ = [8, 5, 9, 7]
lowerCamelCase__ = [
[2, 0, 1, 1],
[0, 1, 2, 1],
[4, 0, 0, 3],
[0, 2, 1, 0],
[1, 0, 3, 0],
]
lowerCamelCase__ = [
[3, 2, 1, 4],
[0, 2, 5, 2],
[5, 1, 0, 5],
[1, 5, 3, 0],
[3, 0, 3, 3],
]
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : List[str] = claim_vector
__lowerCAmelCase : str = allocated_resources_table
__lowerCAmelCase : Optional[int] = maximum_claim_table
def __lowerCamelCase ( self ):
return [
sum(p_item[i] for p_item in self.__allocated_resources_table )
for i in range(len(self.__allocated_resources_table[0] ) )
]
def __lowerCamelCase ( self ):
return np.array(self.__claim_vector ) - np.array(
self.__processes_resource_summation() )
def __lowerCamelCase ( self ):
return [
list(np.array(self.__maximum_claim_table[i] ) - np.array(_SCREAMING_SNAKE_CASE ) )
for i, allocated_resource in enumerate(self.__allocated_resources_table )
]
def __lowerCamelCase ( self ):
return {self.__need().index(_SCREAMING_SNAKE_CASE ): i for i in self.__need()}
def __lowerCamelCase ( self , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = self.__need()
__lowerCAmelCase : int = self.__allocated_resources_table
__lowerCAmelCase : List[str] = self.__available_resources()
__lowerCAmelCase : Dict = self.__need_index_manager()
for kw, val in kwargs.items():
if kw and val is True:
self.__pretty_data()
print('_' * 50 + '\n' )
while need_list:
__lowerCAmelCase : Optional[Any] = False
for each_need in need_list:
__lowerCAmelCase : Optional[int] = True
for index, need in enumerate(_SCREAMING_SNAKE_CASE ):
if need > available_resources[index]:
__lowerCAmelCase : List[Any] = False
break
if execution:
__lowerCAmelCase : Optional[int] = True
# get the original index of the process from ind_ctrl db
for original_need_index, need_clone in need_index_manager.items():
if each_need == need_clone:
__lowerCAmelCase : Optional[int] = original_need_index
print(f"Process {process_number + 1} is executing." )
# remove the process run from stack
need_list.remove(_SCREAMING_SNAKE_CASE )
# update available/freed resources stack
__lowerCAmelCase : Optional[int] = np.array(_SCREAMING_SNAKE_CASE ) + np.array(
alloc_resources_table[process_number] )
print(
'Updated available resource stack for processes: '
+ ' '.join([str(_SCREAMING_SNAKE_CASE ) for x in available_resources] ) )
break
if safe:
print('The process is in a safe state.\n' )
else:
print('System in unsafe state. Aborting...\n' )
break
def __lowerCamelCase ( self ):
print(' ' * 9 + 'Allocated Resource Table' )
for item in self.__allocated_resources_table:
print(
f"P{self.__allocated_resources_table.index(_SCREAMING_SNAKE_CASE ) + 1}"
+ ' '.join(f"{it:>8}" for it in item )
+ '\n' )
print(' ' * 9 + 'System Resource Table' )
for item in self.__maximum_claim_table:
print(
f"P{self.__maximum_claim_table.index(_SCREAMING_SNAKE_CASE ) + 1}"
+ ' '.join(f"{it:>8}" for it in item )
+ '\n' )
print(
'Current Usage by Active Processes: '
+ ' '.join(str(_SCREAMING_SNAKE_CASE ) for x in self.__claim_vector ) )
print(
'Initial Available Resources: '
+ ' '.join(str(_SCREAMING_SNAKE_CASE ) for x in self.__available_resources() ) )
time.sleep(1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 |
"""simple docstring"""
from __future__ import annotations
lowerCamelCase__ = list[tuple[int, int]]
lowerCamelCase__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowerCamelCase__ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : int = pos_x
__lowerCAmelCase : Optional[Any] = pos_y
__lowerCAmelCase : Optional[int] = (pos_y, pos_x)
__lowerCAmelCase : Union[str, Any] = goal_x
__lowerCAmelCase : Any = goal_y
__lowerCAmelCase : Optional[Any] = g_cost
__lowerCAmelCase : Any = parent
__lowerCAmelCase : Union[str, Any] = self.calculate_heuristic()
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = abs(self.pos_x - self.goal_x )
__lowerCAmelCase : str = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , _SCREAMING_SNAKE_CASE ):
return self.f_cost < other.f_cost
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = [self.start]
__lowerCAmelCase : list[Node] = []
__lowerCAmelCase : str = False
def __lowerCamelCase ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__lowerCAmelCase : Optional[int] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
__lowerCAmelCase : Union[str, Any] = True
return self.retrace_path(_SCREAMING_SNAKE_CASE )
self.closed_nodes.append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = self.get_successors(_SCREAMING_SNAKE_CASE )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
else:
# retrieve the best current path
__lowerCAmelCase : Optional[Any] = self.open_nodes.pop(self.open_nodes.index(_SCREAMING_SNAKE_CASE ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
else:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
if not self.reached:
return [self.start.pos]
return None
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = []
for action in delta:
__lowerCAmelCase : Optional[int] = parent.pos_x + action[1]
__lowerCAmelCase : Union[str, Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_SCREAMING_SNAKE_CASE ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _SCREAMING_SNAKE_CASE , ) )
return successors
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = node
__lowerCAmelCase : Optional[int] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowerCAmelCase : int = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
lowerCamelCase__ = (0, 0)
lowerCamelCase__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("""------""")
lowerCamelCase__ = GreedyBestFirst(init, goal)
lowerCamelCase__ = greedy_bf.search()
if path:
for pos_x, pos_y in path:
lowerCamelCase__ = 2
for elem in grid:
print(elem) | 86 | 1 |
"""simple docstring"""
# coding=utf-8
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# this script dumps information about the environment
import os
import platform
import sys
lowerCamelCase__ = """3"""
print("""Python version:""", sys.version)
print("""OS platform:""", platform.platform())
print("""OS architecture:""", platform.machine())
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
except ImportError:
print("""Torch version:""", None)
try:
import transformers
print("""transformers version:""", transformers.__version__)
except ImportError:
print("""transformers version:""", None) | 86 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float(moles / volume ) * nfactor )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 | 1 |
"""simple docstring"""
from typing import List, Union
import numpy as np
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_DEPTH_ESTIMATION_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(_lowerCamelCase)
class A__ ( _lowerCamelCase):
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
requires_backends(self , 'vision' )
self.check_model_type(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , **_SCREAMING_SNAKE_CASE ):
return {}, {}, {}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = load_image(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = image.size
__lowerCAmelCase : Optional[Any] = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
return model_inputs
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = self.model(**_SCREAMING_SNAKE_CASE )
return model_outputs
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = model_outputs.predicted_depth
__lowerCAmelCase : str = torch.nn.functional.interpolate(
predicted_depth.unsqueeze(1 ) , size=self.image_size[::-1] , mode='bicubic' , align_corners=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = prediction.squeeze().cpu().numpy()
__lowerCAmelCase : Dict = (output * 2_55 / np.max(_SCREAMING_SNAKE_CASE )).astype('uint8' )
__lowerCAmelCase : int = Image.fromarray(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = {}
__lowerCAmelCase : Tuple = predicted_depth
__lowerCAmelCase : Optional[Any] = depth
return output_dict | 86 |
"""simple docstring"""
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class A__ ( enum.Enum):
A_ : List[Any] = 0
A_ : Dict = 1
A_ : Union[str, Any] = 2
@add_end_docstrings(_lowerCamelCase)
class A__ ( _lowerCamelCase):
A_ : str = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
__lowerCAmelCase : Any = None
if self.model.config.prefix is not None:
__lowerCAmelCase : str = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
__lowerCAmelCase : Tuple = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self._sanitize_parameters(prefix=_SCREAMING_SNAKE_CASE , **self._forward_params )
__lowerCAmelCase : List[str] = {**self._preprocess_params, **preprocess_params}
__lowerCAmelCase : List[str] = {**self._forward_params, **forward_params}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : Optional[int] = {}
if prefix is not None:
__lowerCAmelCase : Union[str, Any] = prefix
if prefix:
__lowerCAmelCase : Dict = self.tokenizer(
_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
__lowerCAmelCase : List[Any] = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
' [None, \'hole\']' )
__lowerCAmelCase : int = handle_long_generation
preprocess_params.update(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = generate_kwargs
__lowerCAmelCase : List[Any] = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
__lowerCAmelCase : Optional[Any] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
__lowerCAmelCase : List[Any] = ReturnType.TENSORS
if return_type is not None:
__lowerCAmelCase : Optional[Any] = return_type
if clean_up_tokenization_spaces is not None:
__lowerCAmelCase : Tuple = clean_up_tokenization_spaces
if stop_sequence is not None:
__lowerCAmelCase : Union[str, Any] = self.tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
__lowerCAmelCase : Optional[Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="" , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = self.tokenizer(
prefix + prompt_text , padding=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
__lowerCAmelCase : Optional[Any] = prompt_text
if handle_long_generation == "hole":
__lowerCAmelCase : str = inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
__lowerCAmelCase : Union[str, Any] = generate_kwargs['max_new_tokens']
else:
__lowerCAmelCase : Any = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
__lowerCAmelCase : Any = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
__lowerCAmelCase : int = inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
__lowerCAmelCase : List[Any] = inputs['attention_mask'][:, -keep_length:]
return inputs
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = model_inputs['input_ids']
__lowerCAmelCase : List[Any] = model_inputs.get('attention_mask' , _SCREAMING_SNAKE_CASE )
# Allow empty prompts
if input_ids.shape[1] == 0:
__lowerCAmelCase : Dict = None
__lowerCAmelCase : str = None
__lowerCAmelCase : Tuple = 1
else:
__lowerCAmelCase : Any = input_ids.shape[0]
__lowerCAmelCase : Union[str, Any] = model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
__lowerCAmelCase : Optional[int] = generate_kwargs.pop('prefix_length' , 0 )
if prefix_length > 0:
__lowerCAmelCase : Any = 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
__lowerCAmelCase : List[str] = generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
__lowerCAmelCase : Dict = 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
__lowerCAmelCase : Optional[int] = self.model.generate(input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = generated_sequence.shape[0]
if self.framework == "pt":
__lowerCAmelCase : Dict = generated_sequence.reshape(_SCREAMING_SNAKE_CASE , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
__lowerCAmelCase : Any = tf.reshape(_SCREAMING_SNAKE_CASE , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=ReturnType.FULL_TEXT , _SCREAMING_SNAKE_CASE=True ):
__lowerCAmelCase : Any = model_outputs['generated_sequence'][0]
__lowerCAmelCase : Tuple = model_outputs['input_ids']
__lowerCAmelCase : Any = model_outputs['prompt_text']
__lowerCAmelCase : int = generated_sequence.numpy().tolist()
__lowerCAmelCase : Union[str, Any] = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
__lowerCAmelCase : int = {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
__lowerCAmelCase : Any = self.tokenizer.decode(
_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
__lowerCAmelCase : Optional[Any] = 0
else:
__lowerCAmelCase : Any = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE , ) )
if return_type == ReturnType.FULL_TEXT:
__lowerCAmelCase : Union[str, Any] = prompt_text + text[prompt_length:]
else:
__lowerCAmelCase : int = text[prompt_length:]
__lowerCAmelCase : Dict = {'generated_text': all_text}
records.append(_SCREAMING_SNAKE_CASE )
return records | 86 | 1 |
"""simple docstring"""
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""vocab_file""": """vocab.txt""", """emoji_file""": """emoji.json"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt""",
},
"""emoji_file""": {
"""abeja/gpt-neox-japanese-2.7b""": """https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json""",
},
}
lowerCamelCase__ = {
"""abeja/gpt-neox-japanese-2.7b""": 2_048,
}
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
with open(_UpperCamelCase , 'r' , encoding='utf-8' ) as f:
__lowerCAmelCase : Any = json.loads(f.read() )
__lowerCAmelCase : Optional[int] = collections.OrderedDict()
__lowerCAmelCase : Dict = collections.OrderedDict()
__lowerCAmelCase : Dict = collections.OrderedDict()
with open(_UpperCamelCase , 'r' , encoding='utf-8' ) as f:
__lowerCAmelCase : Tuple = f.readlines()
__lowerCAmelCase : List[Any] = [[t.rstrip('\n' )] if (t == ',' or ',' not in t) else t.rstrip('\n' ).split(',' ) for t in token]
for idx, b in enumerate(_UpperCamelCase ):
__lowerCAmelCase : Tuple = b
__lowerCAmelCase : Optional[Any] = idx
for wd in b:
__lowerCAmelCase : Any = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class A__ ( _lowerCamelCase):
A_ : str = VOCAB_FILES_NAMES
A_ : str = PRETRAINED_VOCAB_FILES_MAP
A_ : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Any = ['input_ids', 'attention_mask']
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<|endoftext|>" , _SCREAMING_SNAKE_CASE="<|endoftext|>" , _SCREAMING_SNAKE_CASE="<|startoftext|>" , _SCREAMING_SNAKE_CASE="<|endoftext|>" , _SCREAMING_SNAKE_CASE=False , **_SCREAMING_SNAKE_CASE , ):
super().__init__(
unk_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , do_clean_text=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
if not os.path.isfile(_SCREAMING_SNAKE_CASE ):
raise ValueError(
f"Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"
' model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
if not os.path.isfile(_SCREAMING_SNAKE_CASE ):
raise ValueError(
f"Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"
' pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`' )
__lowerCAmelCase : Optional[int] = do_clean_text
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : int = load_vocab_and_emoji(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def __lowerCamelCase ( self ):
# self.vocab contains support for character fluctuation unique to Japanese, and has a large number of vocab
return len(self.raw_vocab )
def __lowerCamelCase ( self ):
return dict(self.raw_vocab , **self.added_tokens_encoder )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
return self.subword_tokenizer.tokenize(_SCREAMING_SNAKE_CASE , clean=self.do_clean_text )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
return self.vocab.get(_SCREAMING_SNAKE_CASE , self.vocab.get(self.unk_token ) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
return self.subword_tokenizer.convert_id_to_token(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = ''.join(_SCREAMING_SNAKE_CASE ).strip()
return out_string
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE ) + [self.eos_token_id] )
if len(_SCREAMING_SNAKE_CASE ) > self.model_max_length:
__lowerCAmelCase : int = input_ids[-self.model_max_length :]
return input_ids
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
__lowerCAmelCase : str = 0
if os.path.isdir(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__lowerCAmelCase : Dict = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['emoji_file'] )
else:
__lowerCAmelCase : int = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['vocab_file']
)
__lowerCAmelCase : List[str] = (
(filename_prefix + '-' if filename_prefix else '') + save_directory + VOCAB_FILES_NAMES['emoji_file']
)
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
' Please check that the vocabulary is not corrupted!' )
__lowerCAmelCase : Tuple = token_index
writer.write(','.join(_SCREAMING_SNAKE_CASE ) + '\n' )
index += 1
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as writer:
json.dump(self.emoji , _SCREAMING_SNAKE_CASE )
return vocab_file, emoji_file
class A__ ( _lowerCamelCase):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = vocab # same as swe
__lowerCAmelCase : str = ids_to_tokens # same as bpe
__lowerCAmelCase : Tuple = emoji
__lowerCAmelCase : str = np.max([len(_SCREAMING_SNAKE_CASE ) for w in self.vocab.keys()] )
__lowerCAmelCase : List[str] = re.compile(R'(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)' )
__lowerCAmelCase : List[Any] = re.compile(R'[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*' )
__lowerCAmelCase : Tuple = re.compile(R'[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}' )
__lowerCAmelCase : Any = re.compile(
R'([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
__lowerCAmelCase : List[str] = re.compile(
R'(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*' )
__lowerCAmelCase : Tuple = re.compile(
R'((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*' )
__lowerCAmelCase : Optional[int] = '─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿'
__lowerCAmelCase : str = '▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟'
__lowerCAmelCase : List[Any] = str.maketrans({k: '<BLOCK>' for k in keisen + blocks} )
def __len__( self ):
return len(self.ids_to_tokens )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = self.content_repattera.sub('<URL>' , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = self.content_repattera.sub('<EMAIL>' , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = self.content_repattera.sub('<TEL>' , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = self.content_repattera.sub('<DATE>' , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = self.content_repattera.sub('<DATE>' , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = self.content_repattera.sub('<PRICE>' , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
__lowerCAmelCase : int = content.replace('<BLOCK><BLOCK>' , '<BLOCK>' )
return content
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
__lowerCAmelCase : str = text.replace(' ' , '<SP>' )
__lowerCAmelCase : int = text.replace(' ' , '<SP>' )
__lowerCAmelCase : int = text.replace('\r\n' , '<BR>' )
__lowerCAmelCase : Tuple = text.replace('\n' , '<BR>' )
__lowerCAmelCase : Optional[Any] = text.replace('\r' , '<BR>' )
__lowerCAmelCase : Tuple = text.replace('\t' , '<TAB>' )
__lowerCAmelCase : List[str] = text.replace('—' , 'ー' )
__lowerCAmelCase : Optional[Any] = text.replace('−' , 'ー' )
for k, v in self.emoji["emoji"].items():
if k in text:
__lowerCAmelCase : Any = text.replace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if clean:
__lowerCAmelCase : Optional[int] = self.clean_text(_SCREAMING_SNAKE_CASE )
def check_simbol(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = x.encode()
if len(_SCREAMING_SNAKE_CASE ) == 1 and len(_SCREAMING_SNAKE_CASE ) == 2:
__lowerCAmelCase : Any = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xc_2_a_1 and c <= 0xc_2_b_f)
or (c >= 0xc_7_8_0 and c <= 0xc_7_8_3)
or (c >= 0xc_a_b_9 and c <= 0xc_b_b_f)
or (c >= 0xc_c_8_0 and c <= 0xc_d_a_2)
):
return True
return False
def checkuae(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = x.encode()
if len(_SCREAMING_SNAKE_CASE ) == 1 and len(_SCREAMING_SNAKE_CASE ) == 3:
__lowerCAmelCase : List[Any] = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xe_2_8_0_8_0 and c <= 0xe_2_b_0_7_f:
return True
return False
__lowerCAmelCase : List[Any] = 0
__lowerCAmelCase : Any = []
while pos < len(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = min(len(_SCREAMING_SNAKE_CASE ) , pos + self.maxlen + 1 ) if text[pos] == '<' else pos + 3
__lowerCAmelCase : Union[str, Any] = [] # (token_id, token, pos)
for e in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , -1 ):
__lowerCAmelCase : Optional[Any] = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(_SCREAMING_SNAKE_CASE ) > 2:
__lowerCAmelCase : Optional[Any] = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(_SCREAMING_SNAKE_CASE ) > 0:
# the smallest token_id is adopted
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : int = sorted(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : x[0] )[0]
result.append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = e
else:
__lowerCAmelCase : List[Any] = pos + 1
__lowerCAmelCase : str = text[pos:end]
if check_simbol(_SCREAMING_SNAKE_CASE ):
result.append('<KIGOU>' )
elif checkuae(_SCREAMING_SNAKE_CASE ):
result.append('<U2000U2BFF>' )
else:
for i in wd.encode('utf-8' ):
result.append('<|byte%d|>' % i )
__lowerCAmelCase : List[str] = end
return result
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="\n" ):
__lowerCAmelCase : Union[str, Any] = []
__lowerCAmelCase : List[Any] = []
__lowerCAmelCase : Dict = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(_SCREAMING_SNAKE_CASE ) > 0:
words.append(bytearray(_SCREAMING_SNAKE_CASE ).decode('utf-8' , errors='replace' ) )
__lowerCAmelCase : Dict = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji['emoji_inv'][word] )
elif word == "<SP>":
words.append(' ' )
elif word == "<BR>":
words.append(_SCREAMING_SNAKE_CASE )
elif word == "<TAB>":
words.append('\t' )
elif word == "<BLOCK>":
words.append('▀' )
elif word == "<KIGOU>":
words.append('ǀ' )
elif word == "<U2000U2BFF>":
words.append('‖' )
else:
words.append(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
words.append(bytearray(_SCREAMING_SNAKE_CASE ).decode('utf-8' , errors='replace' ) )
__lowerCAmelCase : List[str] = ''.join(_SCREAMING_SNAKE_CASE )
return text | 86 |
"""simple docstring"""
from __future__ import annotations
import bisect
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
if hi < 0:
__lowerCAmelCase : Tuple = len(_UpperCamelCase )
while lo < hi:
__lowerCAmelCase : Optional[int] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__lowerCAmelCase : int = mid + 1
else:
__lowerCAmelCase : List[str] = mid
return lo
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
if hi < 0:
__lowerCAmelCase : List[Any] = len(_UpperCamelCase )
while lo < hi:
__lowerCAmelCase : Union[str, Any] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__lowerCAmelCase : Dict = mid + 1
else:
__lowerCAmelCase : str = mid
return lo
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
sorted_collection.insert(bisect_left(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
sorted_collection.insert(bisect_right(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : List[Any] = 0
__lowerCAmelCase : int = len(_UpperCamelCase ) - 1
while left <= right:
__lowerCAmelCase : List[Any] = left + (right - left) // 2
__lowerCAmelCase : Union[str, Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__lowerCAmelCase : Tuple = midpoint - 1
else:
__lowerCAmelCase : str = midpoint + 1
return None
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Union[str, Any] = bisect.bisect_left(_UpperCamelCase , _UpperCamelCase )
if index != len(_UpperCamelCase ) and sorted_collection[index] == item:
return index
return None
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if right < left:
return None
__lowerCAmelCase : List[str] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , midpoint - 1 )
else:
return binary_search_by_recursion(_UpperCamelCase , _UpperCamelCase , midpoint + 1 , _UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase__ = input("""Enter numbers separated by comma:\n""").strip()
lowerCamelCase__ = sorted(int(item) for item in user_input.split(""","""))
lowerCamelCase__ = int(input("""Enter a single number to be found in the list:\n"""))
lowerCamelCase__ = binary_search(collection, target)
if result is None:
print(f'{target} was not found in {collection}.')
else:
print(f'{target} was found at position {result} in {collection}.') | 86 | 1 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
lowerCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase__ = """
Examples:
```py
>>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline
>>> import torch
>>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> negative_image_emb = out.negative_image_embeds
>>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... prompt,
... image_embeds=image_emb,
... negative_image_embeds=negative_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=8 ):
__lowerCAmelCase : Dict = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
__lowerCAmelCase : List[str] = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class A__ ( _lowerCamelCase):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
super().__init__()
self.register_modules(
text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , movq=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if latents is None:
__lowerCAmelCase : Tuple = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
__lowerCAmelCase : Any = latents.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = latents * scheduler.init_noise_sigma
return latents
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , ):
__lowerCAmelCase : Tuple = len(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else 1
# get prompt text embeddings
__lowerCAmelCase : Dict = self.tokenizer(
_SCREAMING_SNAKE_CASE , padding='max_length' , truncation=_SCREAMING_SNAKE_CASE , max_length=77 , return_attention_mask=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors='pt' , )
__lowerCAmelCase : Tuple = text_inputs.input_ids
__lowerCAmelCase : Union[str, Any] = self.tokenizer(_SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
__lowerCAmelCase : Dict = text_input_ids.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = text_inputs.attention_mask.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : str = self.text_encoder(
input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = prompt_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : Dict = text_encoder_hidden_states.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : Optional[int] = text_mask.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
__lowerCAmelCase : List[str]
if negative_prompt is None:
__lowerCAmelCase : Union[str, Any] = [''] * batch_size
elif type(_SCREAMING_SNAKE_CASE ) is not type(_SCREAMING_SNAKE_CASE ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(_SCREAMING_SNAKE_CASE )} !="
f" {type(_SCREAMING_SNAKE_CASE )}." )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = [negative_prompt]
elif batch_size != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(_SCREAMING_SNAKE_CASE )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.' )
else:
__lowerCAmelCase : Optional[int] = negative_prompt
__lowerCAmelCase : Tuple = self.tokenizer(
_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=77 , truncation=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors='pt' , )
__lowerCAmelCase : Union[str, Any] = uncond_input.input_ids.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = uncond_input.attention_mask.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Any = self.text_encoder(
input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__lowerCAmelCase : List[str] = negative_prompt_embeds.shape[1]
__lowerCAmelCase : Any = negative_prompt_embeds.repeat(1 , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = uncond_text_encoder_hidden_states.shape[1]
__lowerCAmelCase : List[Any] = uncond_text_encoder_hidden_states.repeat(1 , _SCREAMING_SNAKE_CASE , 1 )
__lowerCAmelCase : Optional[int] = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , _SCREAMING_SNAKE_CASE , -1 )
__lowerCAmelCase : Optional[Any] = uncond_text_mask.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowerCAmelCase : Tuple = torch.cat([negative_prompt_embeds, prompt_embeds] )
__lowerCAmelCase : Tuple = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
__lowerCAmelCase : int = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
__lowerCAmelCase : Union[str, Any] = torch.device(f"cuda:{gpu_id}" )
__lowerCAmelCase : List[Any] = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=0 ):
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
__lowerCAmelCase : str = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__lowerCAmelCase : Any = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
__lowerCAmelCase , __lowerCAmelCase : Any = cpu_offload_with_hook(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , prev_module_hook=_SCREAMING_SNAKE_CASE )
if self.safety_checker is not None:
__lowerCAmelCase , __lowerCAmelCase : Dict = cpu_offload_with_hook(self.safety_checker , _SCREAMING_SNAKE_CASE , prev_module_hook=_SCREAMING_SNAKE_CASE )
# We'll offload the last model manually.
__lowerCAmelCase : Optional[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCamelCase ( self ):
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_SCREAMING_SNAKE_CASE , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 5_12 , _SCREAMING_SNAKE_CASE = 5_12 , _SCREAMING_SNAKE_CASE = 1_00 , _SCREAMING_SNAKE_CASE = 4.0 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = 1
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = len(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(_SCREAMING_SNAKE_CASE )}" )
__lowerCAmelCase : Dict = self._execution_device
__lowerCAmelCase : Optional[Any] = batch_size * num_images_per_prompt
__lowerCAmelCase : Optional[int] = guidance_scale > 1.0
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : int = self._encode_prompt(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
__lowerCAmelCase : Optional[Any] = image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : int = negative_image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : Any = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=_SCREAMING_SNAKE_CASE )
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = self.scheduler.timesteps
__lowerCAmelCase : int = self.unet.config.in_channels
__lowerCAmelCase , __lowerCAmelCase : Any = get_new_h_w(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.movq_scale_factor )
# create initial latent
__lowerCAmelCase : str = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.scheduler , )
for i, t in enumerate(self.progress_bar(_SCREAMING_SNAKE_CASE ) ):
# expand the latents if we are doing classifier free guidance
__lowerCAmelCase : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCAmelCase : Union[str, Any] = {'text_embeds': prompt_embeds, 'image_embeds': image_embeds}
__lowerCAmelCase : Optional[Any] = self.unet(
sample=_SCREAMING_SNAKE_CASE , timestep=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , added_cond_kwargs=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )[0]
if do_classifier_free_guidance:
__lowerCAmelCase , __lowerCAmelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 )
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = noise_pred.chunk(2 )
__lowerCAmelCase , __lowerCAmelCase : int = variance_pred.chunk(2 )
__lowerCAmelCase : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__lowerCAmelCase : Any = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCAmelCase : List[str] = self.scheduler.step(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , ).prev_sample
# post-processing
__lowerCAmelCase : Tuple = self.movq.decode(_SCREAMING_SNAKE_CASE , force_not_quantize=_SCREAMING_SNAKE_CASE )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
__lowerCAmelCase : List[str] = image * 0.5 + 0.5
__lowerCAmelCase : Dict = image.clamp(0 , 1 )
__lowerCAmelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowerCAmelCase : Union[str, Any] = self.numpy_to_pil(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_SCREAMING_SNAKE_CASE ) | 86 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class A__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : Tuple = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TFAutoModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = AutoModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = TFAutoModelForPreTraining.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = AutoModelForPreTraining.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TFAutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : List[str] = TFAutoModelForCausalLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = TFAutoModelForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : int = TFAutoModelForMaskedLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = AutoModelForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : str = AutoModelForMaskedLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = AutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Dict = AutoModelForSeqaSeqLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TFAutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = TFAutoModelForQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = AutoModelForQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
__lowerCAmelCase : Tuple = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
__lowerCAmelCase : Tuple = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 ) | 86 | 1 |
"""simple docstring"""
from typing import Optional
import torch
import torch.utils.checkpoint
from torch import Tensor, nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import (
BackboneOutput,
BaseModelOutputWithNoAttention,
BaseModelOutputWithPoolingAndNoAttention,
ImageClassifierOutputWithNoAttention,
)
from ...modeling_utils import PreTrainedModel
from ...utils import (
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
logging,
replace_return_docstrings,
)
from ...utils.backbone_utils import BackboneMixin
from .configuration_resnet import ResNetConfig
lowerCamelCase__ = logging.get_logger(__name__)
# General docstring
lowerCamelCase__ = """ResNetConfig"""
# Base docstring
lowerCamelCase__ = """microsoft/resnet-50"""
lowerCamelCase__ = [1, 2_048, 7, 7]
# Image classification docstring
lowerCamelCase__ = """microsoft/resnet-50"""
lowerCamelCase__ = """tiger cat"""
lowerCamelCase__ = [
"""microsoft/resnet-50""",
# See all resnet models at https://huggingface.co/models?filter=resnet
]
class A__ ( nn.Module):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 3 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = "relu" ):
super().__init__()
__lowerCAmelCase : Optional[int] = nn.Convad(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=_SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , padding=kernel_size // 2 , bias=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = nn.BatchNormad(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = ACTaFN[activation] if activation is not None else nn.Identity()
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = self.convolution(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = self.normalization(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = self.activation(_SCREAMING_SNAKE_CASE )
return hidden_state
class A__ ( nn.Module):
def __init__( self , _SCREAMING_SNAKE_CASE ):
super().__init__()
__lowerCAmelCase : Dict = ResNetConvLayer(
config.num_channels , config.embedding_size , kernel_size=7 , stride=2 , activation=config.hidden_act )
__lowerCAmelCase : Dict = nn.MaxPoolad(kernel_size=3 , stride=2 , padding=1 )
__lowerCAmelCase : int = config.num_channels
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = pixel_values.shape[1]
if num_channels != self.num_channels:
raise ValueError(
'Make sure that the channel dimension of the pixel values match with the one set in the configuration.' )
__lowerCAmelCase : List[str] = self.embedder(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = self.pooler(_SCREAMING_SNAKE_CASE )
return embedding
class A__ ( nn.Module):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 2 ):
super().__init__()
__lowerCAmelCase : Optional[Any] = nn.Convad(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1 , stride=_SCREAMING_SNAKE_CASE , bias=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = nn.BatchNormad(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = self.convolution(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = self.normalization(_SCREAMING_SNAKE_CASE )
return hidden_state
class A__ ( nn.Module):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = "relu" ):
super().__init__()
__lowerCAmelCase : Any = in_channels != out_channels or stride != 1
__lowerCAmelCase : Any = (
ResNetShortCut(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE ) if should_apply_shortcut else nn.Identity()
)
__lowerCAmelCase : List[str] = nn.Sequential(
ResNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE ) , ResNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , activation=_SCREAMING_SNAKE_CASE ) , )
__lowerCAmelCase : Optional[int] = ACTaFN[activation]
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = hidden_state
__lowerCAmelCase : List[str] = self.layer(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = self.shortcut(_SCREAMING_SNAKE_CASE )
hidden_state += residual
__lowerCAmelCase : Union[str, Any] = self.activation(_SCREAMING_SNAKE_CASE )
return hidden_state
class A__ ( nn.Module):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = "relu" , _SCREAMING_SNAKE_CASE = 4 ):
super().__init__()
__lowerCAmelCase : Tuple = in_channels != out_channels or stride != 1
__lowerCAmelCase : Dict = out_channels // reduction
__lowerCAmelCase : Any = (
ResNetShortCut(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE ) if should_apply_shortcut else nn.Identity()
)
__lowerCAmelCase : Any = nn.Sequential(
ResNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1 ) , ResNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE ) , ResNetConvLayer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , kernel_size=1 , activation=_SCREAMING_SNAKE_CASE ) , )
__lowerCAmelCase : str = ACTaFN[activation]
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = hidden_state
__lowerCAmelCase : Optional[Any] = self.layer(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = self.shortcut(_SCREAMING_SNAKE_CASE )
hidden_state += residual
__lowerCAmelCase : str = self.activation(_SCREAMING_SNAKE_CASE )
return hidden_state
class A__ ( nn.Module):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = 2 , _SCREAMING_SNAKE_CASE = 2 , ):
super().__init__()
__lowerCAmelCase : str = ResNetBottleNeckLayer if config.layer_type == 'bottleneck' else ResNetBasicLayer
__lowerCAmelCase : List[Any] = nn.Sequential(
# downsampling is done in the first layer with stride of 2
layer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , stride=_SCREAMING_SNAKE_CASE , activation=config.hidden_act ) , *[layer(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , activation=config.hidden_act ) for _ in range(depth - 1 )] , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = input
for layer in self.layers:
__lowerCAmelCase : Optional[int] = layer(_SCREAMING_SNAKE_CASE )
return hidden_state
class A__ ( nn.Module):
def __init__( self , _SCREAMING_SNAKE_CASE ):
super().__init__()
__lowerCAmelCase : int = nn.ModuleList([] )
# based on `downsample_in_first_stage` the first layer of the first stage may or may not downsample the input
self.stages.append(
ResNetStage(
_SCREAMING_SNAKE_CASE , config.embedding_size , config.hidden_sizes[0] , stride=2 if config.downsample_in_first_stage else 1 , depth=config.depths[0] , ) )
__lowerCAmelCase : Any = zip(config.hidden_sizes , config.hidden_sizes[1:] )
for (in_channels, out_channels), depth in zip(_SCREAMING_SNAKE_CASE , config.depths[1:] ):
self.stages.append(ResNetStage(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , depth=_SCREAMING_SNAKE_CASE ) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = True ):
__lowerCAmelCase : List[str] = () if output_hidden_states else None
for stage_module in self.stages:
if output_hidden_states:
__lowerCAmelCase : Union[str, Any] = hidden_states + (hidden_state,)
__lowerCAmelCase : List[str] = stage_module(_SCREAMING_SNAKE_CASE )
if output_hidden_states:
__lowerCAmelCase : Optional[Any] = hidden_states + (hidden_state,)
if not return_dict:
return tuple(v for v in [hidden_state, hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(
last_hidden_state=_SCREAMING_SNAKE_CASE , hidden_states=_SCREAMING_SNAKE_CASE , )
class A__ ( _lowerCamelCase):
A_ : Tuple = ResNetConfig
A_ : List[Any] = 'resnet'
A_ : Optional[int] = 'pixel_values'
A_ : int = True
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
if isinstance(_SCREAMING_SNAKE_CASE , nn.Convad ):
nn.init.kaiming_normal_(module.weight , mode='fan_out' , nonlinearity='relu' )
elif isinstance(_SCREAMING_SNAKE_CASE , (nn.BatchNormad, nn.GroupNorm) ):
nn.init.constant_(module.weight , 1 )
nn.init.constant_(module.bias , 0 )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = value
lowerCamelCase__ = R"""
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
behavior.
Parameters:
config ([`ResNetConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
lowerCamelCase__ = R"""
Args:
pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
[`ConvNextImageProcessor.__call__`] for details.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail.
return_dict (`bool`, *optional*):
Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
"""
@add_start_docstrings(
'The bare ResNet model outputting raw features without any specific head on top.' , _lowerCamelCase , )
class A__ ( _lowerCamelCase):
def __init__( self , _SCREAMING_SNAKE_CASE ):
super().__init__(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = config
__lowerCAmelCase : str = ResNetEmbeddings(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = ResNetEncoder(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = nn.AdaptiveAvgPoolad((1, 1) )
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC , output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , modality='vision' , expected_output=_EXPECTED_OUTPUT_SHAPE , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None ):
__lowerCAmelCase : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCAmelCase : Optional[int] = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase : Any = self.embedder(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = self.encoder(
_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = encoder_outputs[0]
__lowerCAmelCase : Any = self.pooler(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (last_hidden_state, pooled_output) + encoder_outputs[1:]
return BaseModelOutputWithPoolingAndNoAttention(
last_hidden_state=_SCREAMING_SNAKE_CASE , pooler_output=_SCREAMING_SNAKE_CASE , hidden_states=encoder_outputs.hidden_states , )
@add_start_docstrings(
'\n ResNet Model with an image classification head on top (a linear layer on top of the pooled features), e.g. for\n ImageNet.\n ' , _lowerCamelCase , )
class A__ ( _lowerCamelCase):
def __init__( self , _SCREAMING_SNAKE_CASE ):
super().__init__(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = config.num_labels
__lowerCAmelCase : Any = ResNetModel(_SCREAMING_SNAKE_CASE )
# classification head
__lowerCAmelCase : Any = nn.Sequential(
nn.Flatten() , nn.Linear(config.hidden_sizes[-1] , config.num_labels ) if config.num_labels > 0 else nn.Identity() , )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT , output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC , expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , ):
__lowerCAmelCase : Any = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase : Any = self.resnet(_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = outputs.pooler_output if return_dict else outputs[1]
__lowerCAmelCase : Union[str, Any] = self.classifier(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
__lowerCAmelCase : Optional[Any] = 'regression'
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
__lowerCAmelCase : List[Any] = 'single_label_classification'
else:
__lowerCAmelCase : Optional[int] = 'multi_label_classification'
if self.config.problem_type == "regression":
__lowerCAmelCase : int = MSELoss()
if self.num_labels == 1:
__lowerCAmelCase : Tuple = loss_fct(logits.squeeze() , labels.squeeze() )
else:
__lowerCAmelCase : List[str] = loss_fct(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif self.config.problem_type == "single_label_classification":
__lowerCAmelCase : List[Any] = CrossEntropyLoss()
__lowerCAmelCase : int = loss_fct(logits.view(-1 , self.num_labels ) , labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
__lowerCAmelCase : Tuple = BCEWithLogitsLoss()
__lowerCAmelCase : str = loss_fct(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not return_dict:
__lowerCAmelCase : str = (logits,) + outputs[2:]
return (loss,) + output if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_SCREAMING_SNAKE_CASE , logits=_SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states )
@add_start_docstrings(
'\n ResNet backbone, to be used with frameworks like DETR and MaskFormer.\n ' , _lowerCamelCase , )
class A__ ( _lowerCamelCase , _lowerCamelCase):
def __init__( self , _SCREAMING_SNAKE_CASE ):
super().__init__(_SCREAMING_SNAKE_CASE )
super()._init_backbone(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = [config.embedding_size] + config.hidden_sizes
__lowerCAmelCase : Optional[int] = ResNetEmbeddings(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = ResNetEncoder(_SCREAMING_SNAKE_CASE )
# initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_SCREAMING_SNAKE_CASE )
@replace_return_docstrings(output_type=_SCREAMING_SNAKE_CASE , config_class=_CONFIG_FOR_DOC )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None ):
__lowerCAmelCase : int = return_dict if return_dict is not None else self.config.use_return_dict
__lowerCAmelCase : str = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
__lowerCAmelCase : Optional[int] = self.embedder(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = self.encoder(_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = outputs.hidden_states
__lowerCAmelCase : Any = ()
for idx, stage in enumerate(self.stage_names ):
if stage in self.out_features:
feature_maps += (hidden_states[idx],)
if not return_dict:
__lowerCAmelCase : Any = (feature_maps,)
if output_hidden_states:
output += (outputs.hidden_states,)
return output
return BackboneOutput(
feature_maps=_SCREAMING_SNAKE_CASE , hidden_states=outputs.hidden_states if output_hidden_states else None , attentions=_SCREAMING_SNAKE_CASE , ) | 86 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
lowerCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase__ = """
Examples:
```py
>>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline
>>> import torch
>>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> negative_image_emb = out.negative_image_embeds
>>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... prompt,
... image_embeds=image_emb,
... negative_image_embeds=negative_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=8 ):
__lowerCAmelCase : Dict = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
__lowerCAmelCase : List[str] = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class A__ ( _lowerCamelCase):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
super().__init__()
self.register_modules(
text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , movq=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if latents is None:
__lowerCAmelCase : Tuple = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
__lowerCAmelCase : Any = latents.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = latents * scheduler.init_noise_sigma
return latents
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , ):
__lowerCAmelCase : Tuple = len(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else 1
# get prompt text embeddings
__lowerCAmelCase : Dict = self.tokenizer(
_SCREAMING_SNAKE_CASE , padding='max_length' , truncation=_SCREAMING_SNAKE_CASE , max_length=77 , return_attention_mask=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors='pt' , )
__lowerCAmelCase : Tuple = text_inputs.input_ids
__lowerCAmelCase : Union[str, Any] = self.tokenizer(_SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
__lowerCAmelCase : Dict = text_input_ids.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = text_inputs.attention_mask.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : str = self.text_encoder(
input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = prompt_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : Dict = text_encoder_hidden_states.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : Optional[int] = text_mask.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
__lowerCAmelCase : List[str]
if negative_prompt is None:
__lowerCAmelCase : Union[str, Any] = [''] * batch_size
elif type(_SCREAMING_SNAKE_CASE ) is not type(_SCREAMING_SNAKE_CASE ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(_SCREAMING_SNAKE_CASE )} !="
f" {type(_SCREAMING_SNAKE_CASE )}." )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = [negative_prompt]
elif batch_size != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(_SCREAMING_SNAKE_CASE )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.' )
else:
__lowerCAmelCase : Optional[int] = negative_prompt
__lowerCAmelCase : Tuple = self.tokenizer(
_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=77 , truncation=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors='pt' , )
__lowerCAmelCase : Union[str, Any] = uncond_input.input_ids.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = uncond_input.attention_mask.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Any = self.text_encoder(
input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__lowerCAmelCase : List[str] = negative_prompt_embeds.shape[1]
__lowerCAmelCase : Any = negative_prompt_embeds.repeat(1 , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = uncond_text_encoder_hidden_states.shape[1]
__lowerCAmelCase : List[Any] = uncond_text_encoder_hidden_states.repeat(1 , _SCREAMING_SNAKE_CASE , 1 )
__lowerCAmelCase : Optional[int] = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , _SCREAMING_SNAKE_CASE , -1 )
__lowerCAmelCase : Optional[Any] = uncond_text_mask.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowerCAmelCase : Tuple = torch.cat([negative_prompt_embeds, prompt_embeds] )
__lowerCAmelCase : Tuple = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
__lowerCAmelCase : int = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
__lowerCAmelCase : Union[str, Any] = torch.device(f"cuda:{gpu_id}" )
__lowerCAmelCase : List[Any] = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=0 ):
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
__lowerCAmelCase : str = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__lowerCAmelCase : Any = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
__lowerCAmelCase , __lowerCAmelCase : Any = cpu_offload_with_hook(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , prev_module_hook=_SCREAMING_SNAKE_CASE )
if self.safety_checker is not None:
__lowerCAmelCase , __lowerCAmelCase : Dict = cpu_offload_with_hook(self.safety_checker , _SCREAMING_SNAKE_CASE , prev_module_hook=_SCREAMING_SNAKE_CASE )
# We'll offload the last model manually.
__lowerCAmelCase : Optional[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCamelCase ( self ):
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_SCREAMING_SNAKE_CASE , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 5_12 , _SCREAMING_SNAKE_CASE = 5_12 , _SCREAMING_SNAKE_CASE = 1_00 , _SCREAMING_SNAKE_CASE = 4.0 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = 1
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = len(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(_SCREAMING_SNAKE_CASE )}" )
__lowerCAmelCase : Dict = self._execution_device
__lowerCAmelCase : Optional[Any] = batch_size * num_images_per_prompt
__lowerCAmelCase : Optional[int] = guidance_scale > 1.0
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : int = self._encode_prompt(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
__lowerCAmelCase : Optional[Any] = image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : int = negative_image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : Any = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=_SCREAMING_SNAKE_CASE )
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = self.scheduler.timesteps
__lowerCAmelCase : int = self.unet.config.in_channels
__lowerCAmelCase , __lowerCAmelCase : Any = get_new_h_w(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.movq_scale_factor )
# create initial latent
__lowerCAmelCase : str = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.scheduler , )
for i, t in enumerate(self.progress_bar(_SCREAMING_SNAKE_CASE ) ):
# expand the latents if we are doing classifier free guidance
__lowerCAmelCase : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCAmelCase : Union[str, Any] = {'text_embeds': prompt_embeds, 'image_embeds': image_embeds}
__lowerCAmelCase : Optional[Any] = self.unet(
sample=_SCREAMING_SNAKE_CASE , timestep=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , added_cond_kwargs=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )[0]
if do_classifier_free_guidance:
__lowerCAmelCase , __lowerCAmelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 )
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = noise_pred.chunk(2 )
__lowerCAmelCase , __lowerCAmelCase : int = variance_pred.chunk(2 )
__lowerCAmelCase : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__lowerCAmelCase : Any = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCAmelCase : List[str] = self.scheduler.step(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , ).prev_sample
# post-processing
__lowerCAmelCase : Tuple = self.movq.decode(_SCREAMING_SNAKE_CASE , force_not_quantize=_SCREAMING_SNAKE_CASE )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
__lowerCAmelCase : List[str] = image * 0.5 + 0.5
__lowerCAmelCase : Dict = image.clamp(0 , 1 )
__lowerCAmelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowerCAmelCase : Union[str, Any] = self.numpy_to_pil(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_SCREAMING_SNAKE_CASE ) | 86 | 1 |
"""simple docstring"""
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
lowerCamelCase__ = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
lowerCamelCase__ = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
lowerCamelCase__ = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class A__ ( datasets.Metric):
def __lowerCamelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage='https://github.com/krishnap25/mauve' , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' , id='sequence' ),
'references': datasets.Value('string' , id='sequence' ),
} ) , codebase_urls=['https://github.com/krishnap25/mauve'] , reference_urls=[
'https://arxiv.org/abs/2102.01454',
'https://github.com/krishnap25/mauve',
] , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE="auto" , _SCREAMING_SNAKE_CASE=-1 , _SCREAMING_SNAKE_CASE=0.9 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=5_00 , _SCREAMING_SNAKE_CASE="gpt2-large" , _SCREAMING_SNAKE_CASE=-1 , _SCREAMING_SNAKE_CASE=10_24 , _SCREAMING_SNAKE_CASE=25 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=25 , ):
__lowerCAmelCase : str = compute_mauve(
p_text=_SCREAMING_SNAKE_CASE , q_text=_SCREAMING_SNAKE_CASE , p_features=_SCREAMING_SNAKE_CASE , q_features=_SCREAMING_SNAKE_CASE , p_tokens=_SCREAMING_SNAKE_CASE , q_tokens=_SCREAMING_SNAKE_CASE , num_buckets=_SCREAMING_SNAKE_CASE , pca_max_data=_SCREAMING_SNAKE_CASE , kmeans_explained_var=_SCREAMING_SNAKE_CASE , kmeans_num_redo=_SCREAMING_SNAKE_CASE , kmeans_max_iter=_SCREAMING_SNAKE_CASE , featurize_model_name=_SCREAMING_SNAKE_CASE , device_id=_SCREAMING_SNAKE_CASE , max_text_length=_SCREAMING_SNAKE_CASE , divergence_curve_discretization_size=_SCREAMING_SNAKE_CASE , mauve_scaling_factor=_SCREAMING_SNAKE_CASE , verbose=_SCREAMING_SNAKE_CASE , seed=_SCREAMING_SNAKE_CASE , )
return out | 86 |
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : Union[str, Any] = BarthezTokenizer
A_ : Tuple = BarthezTokenizerFast
A_ : Dict = True
A_ : List[str] = True
def __lowerCamelCase ( self ):
super().setUp()
__lowerCAmelCase : str = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = tokenizer
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = '<pad>'
__lowerCAmelCase : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 10_11_22 )
def __lowerCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_11_22 )
@require_torch
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__lowerCAmelCase : Optional[Any] = [0, 57, 30_18, 7_03_07, 91, 2]
__lowerCAmelCase : Optional[int] = self.tokenizer(
_SCREAMING_SNAKE_CASE , max_length=len(_SCREAMING_SNAKE_CASE ) , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__lowerCAmelCase : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
if not self.test_rust_tokenizer:
return
__lowerCAmelCase : Tuple = self.get_tokenizer()
__lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
__lowerCAmelCase : List[str] = 'I was born in 92000, and this is falsé.'
__lowerCAmelCase : Optional[int] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
__lowerCAmelCase : List[Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# fmt: off
__lowerCAmelCase : str = {'input_ids': [[0, 4_90, 1_43_28, 45_07, 3_54, 47, 4_36_69, 95, 25, 7_81_17, 2_02_15, 1_97_79, 1_90, 22, 4_00, 4, 3_53_43, 8_03_10, 6_03, 86, 2_49_37, 1_05, 3_34_38, 9_47_62, 1_96, 3_96_42, 7, 15, 1_59_33, 1_73, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_05_34, 87, 25, 66, 33_58, 1_96, 5_52_89, 8, 8_29_61, 81, 22_04, 7_52_03, 7, 15, 7_63, 1_29_56, 2_16, 1_78, 1_43_28, 95_95, 13_77, 6_96_93, 7, 4_48, 7_10_21, 1_96, 1_81_06, 14_37, 1_39_74, 1_08, 90_83, 4, 4_93_15, 7, 39, 86, 13_26, 27_93, 4_63_33, 4, 4_48, 1_96, 7_45_88, 7, 4_93_15, 7, 39, 21, 8_22, 3_84_70, 74, 21, 6_67_23, 6_24_80, 8, 2_20_50, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__lowerCAmelCase : Union[str, Any] = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=_SCREAMING_SNAKE_CASE , ) | 86 | 1 |
"""simple docstring"""
import math
import sys
def __lowerCAmelCase (_UpperCamelCase ):
if number != int(_UpperCamelCase ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
__lowerCAmelCase : Any = [-1] * (number + 1)
__lowerCAmelCase : List[Any] = 0
for i in range(1 , number + 1 ):
__lowerCAmelCase : List[Any] = sys.maxsize
__lowerCAmelCase : Optional[int] = int(math.sqrt(_UpperCamelCase ) )
for j in range(1 , root + 1 ):
__lowerCAmelCase : Optional[Any] = 1 + answers[i - (j**2)]
__lowerCAmelCase : Any = min(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : List[str] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class A__ ( _lowerCamelCase):
A_ : Optional[int] = 'poolformer'
def __init__( self , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4.0 , _SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , _SCREAMING_SNAKE_CASE=[64, 1_28, 3_20, 5_12] , _SCREAMING_SNAKE_CASE=[7, 3, 3, 3] , _SCREAMING_SNAKE_CASE=[4, 2, 2, 2] , _SCREAMING_SNAKE_CASE=[2, 1, 1, 1] , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE=0.02 , **_SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : int = num_channels
__lowerCAmelCase : str = patch_size
__lowerCAmelCase : Optional[Any] = stride
__lowerCAmelCase : Optional[int] = padding
__lowerCAmelCase : List[Any] = pool_size
__lowerCAmelCase : int = hidden_sizes
__lowerCAmelCase : str = mlp_ratio
__lowerCAmelCase : Optional[int] = depths
__lowerCAmelCase : str = patch_sizes
__lowerCAmelCase : str = strides
__lowerCAmelCase : Optional[int] = num_encoder_blocks
__lowerCAmelCase : Any = drop_path_rate
__lowerCAmelCase : Any = hidden_act
__lowerCAmelCase : Dict = use_layer_scale
__lowerCAmelCase : Union[str, Any] = layer_scale_init_value
__lowerCAmelCase : Dict = initializer_range
super().__init__(**_SCREAMING_SNAKE_CASE )
class A__ ( _lowerCamelCase):
A_ : List[str] = version.parse('1.11')
@property
def __lowerCamelCase ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __lowerCamelCase ( self ):
return 2E-3 | 86 | 1 |
"""simple docstring"""
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def __lowerCAmelCase (_UpperCamelCase ):
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class A__ ( _lowerCamelCase):
@staticmethod
def __lowerCamelCase ( _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help='Path to location to store the models' )
download_parser.add_argument(
'--force' , action='store_true' , help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' , action='store_true' , help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' , )
download_parser.add_argument('model' , type=_SCREAMING_SNAKE_CASE , help='Name of the model to download' )
download_parser.set_defaults(func=_SCREAMING_SNAKE_CASE )
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = model
__lowerCAmelCase : int = cache
__lowerCAmelCase : List[Any] = force
__lowerCAmelCase : Any = trust_remote_code
def __lowerCamelCase ( self ):
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model , cache_dir=self._cache , force_download=self._force , trust_remote_code=self._trust_remote_code ) | 86 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : Union[str, Any] = DiTPipeline
A_ : Union[str, Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
A_ : List[Any] = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
A_ : Optional[Any] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
A_ : Tuple = False
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : List[str] = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_SCREAMING_SNAKE_CASE , activation_fn='gelu-approximate' , num_embeds_ada_norm=10_00 , norm_type='ada_norm_zero' , norm_elementwise_affine=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : str = AutoencoderKL()
__lowerCAmelCase : Union[str, Any] = DDIMScheduler()
__lowerCAmelCase : Dict = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
__lowerCAmelCase : List[str] = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : List[str] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = 'cpu'
__lowerCAmelCase : Any = self.get_dummy_components()
__lowerCAmelCase : Union[str, Any] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = pipe(**_SCREAMING_SNAKE_CASE ).images
__lowerCAmelCase : List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
__lowerCAmelCase : Optional[int] = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
__lowerCAmelCase : List[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 )
def __lowerCamelCase ( self ):
self._test_inference_batch_single_identical(relax_max_difference=_SCREAMING_SNAKE_CASE , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __lowerCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = torch.manual_seed(0 )
__lowerCAmelCase : int = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
__lowerCAmelCase : Optional[Any] = ['vase', 'umbrella', 'white shark', 'white wolf']
__lowerCAmelCase : Optional[Any] = pipe.get_label_ids(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=40 , output_type='np' ).images
for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = load_numpy(
f"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" )
assert np.abs((expected_image - image).max() ) < 1E-2
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
__lowerCAmelCase : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
__lowerCAmelCase : Dict = ['vase', 'umbrella']
__lowerCAmelCase : List[str] = pipe.get_label_ids(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = torch.manual_seed(0 )
__lowerCAmelCase : Optional[Any] = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type='np' ).images
for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
f"/dit/{word}_512.npy" )
assert np.abs((expected_image - image).max() ) < 1E-1 | 86 | 1 |
"""simple docstring"""
from ..utils import DummyObject, requires_backends
class A__ ( metaclass=_lowerCamelCase):
A_ : Tuple = ['transformers', 'torch', 'note_seq']
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
requires_backends(self , ['transformers', 'torch', 'note_seq'] )
@classmethod
def __lowerCamelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
requires_backends(cls , ['transformers', 'torch', 'note_seq'] )
@classmethod
def __lowerCamelCase ( cls , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
requires_backends(cls , ['transformers', 'torch', 'note_seq'] ) | 86 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : str = ShapEImgaImgPipeline
A_ : str = ['image']
A_ : int = ['image']
A_ : Tuple = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
A_ : Tuple = False
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return self.time_input_dim * 4
@property
def __lowerCamelCase ( self ):
return 8
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Any = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
__lowerCAmelCase : Tuple = CLIPVisionModel(_SCREAMING_SNAKE_CASE )
return model
@property
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = CLIPImageProcessor(
crop_size=2_24 , do_center_crop=_SCREAMING_SNAKE_CASE , do_normalize=_SCREAMING_SNAKE_CASE , do_resize=_SCREAMING_SNAKE_CASE , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_24 , )
return image_processor
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Optional[Any] = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__lowerCAmelCase : List[Any] = PriorTransformer(**_SCREAMING_SNAKE_CASE )
return model
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Dict = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__lowerCAmelCase : int = ShapERenderer(**_SCREAMING_SNAKE_CASE )
return model
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.dummy_prior
__lowerCAmelCase : List[Any] = self.dummy_image_encoder
__lowerCAmelCase : int = self.dummy_image_processor
__lowerCAmelCase : Any = self.dummy_renderer
__lowerCAmelCase : Any = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=10_24 , prediction_type='sample' , use_karras_sigmas=_SCREAMING_SNAKE_CASE , clip_sample=_SCREAMING_SNAKE_CASE , clip_sample_range=1.0 , )
__lowerCAmelCase : Tuple = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
__lowerCAmelCase : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
__lowerCAmelCase : int = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : str = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = 'cpu'
__lowerCAmelCase : Dict = self.get_dummy_components()
__lowerCAmelCase : Optional[int] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = pipe(**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Any = output.images[0]
__lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__lowerCAmelCase : List[Any] = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = torch_device == 'cpu'
__lowerCAmelCase : Optional[Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_SCREAMING_SNAKE_CASE , relax_max_difference=_SCREAMING_SNAKE_CASE , )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.get_dummy_components()
__lowerCAmelCase : List[str] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = 1
__lowerCAmelCase : List[str] = 2
__lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
for key in inputs.keys():
if key in self.batch_params:
__lowerCAmelCase : Optional[Any] = batch_size * [inputs[key]]
__lowerCAmelCase : List[str] = pipe(**_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' )
__lowerCAmelCase : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy' )
__lowerCAmelCase : Union[str, Any] = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' )
__lowerCAmelCase : Dict = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
__lowerCAmelCase : int = pipe(
_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) | 86 | 1 |
"""simple docstring"""
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""",
# See all BART models at https://huggingface.co/models?filter=bart
}
class A__ ( _lowerCamelCase):
A_ : Dict = 'bart'
A_ : Dict = ['past_key_values']
A_ : Union[str, Any] = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self , _SCREAMING_SNAKE_CASE=5_02_65 , _SCREAMING_SNAKE_CASE=10_24 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=40_96 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=40_96 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=10_24 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=1 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2 , **_SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : int = vocab_size
__lowerCAmelCase : Any = max_position_embeddings
__lowerCAmelCase : Union[str, Any] = d_model
__lowerCAmelCase : int = encoder_ffn_dim
__lowerCAmelCase : List[Any] = encoder_layers
__lowerCAmelCase : str = encoder_attention_heads
__lowerCAmelCase : Optional[Any] = decoder_ffn_dim
__lowerCAmelCase : str = decoder_layers
__lowerCAmelCase : Any = decoder_attention_heads
__lowerCAmelCase : Optional[Any] = dropout
__lowerCAmelCase : Optional[Any] = attention_dropout
__lowerCAmelCase : Tuple = activation_dropout
__lowerCAmelCase : int = activation_function
__lowerCAmelCase : Union[str, Any] = init_std
__lowerCAmelCase : Any = encoder_layerdrop
__lowerCAmelCase : Any = decoder_layerdrop
__lowerCAmelCase : Optional[int] = classifier_dropout
__lowerCAmelCase : Optional[Any] = use_cache
__lowerCAmelCase : int = encoder_layers
__lowerCAmelCase : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=_SCREAMING_SNAKE_CASE , pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , is_encoder_decoder=_SCREAMING_SNAKE_CASE , decoder_start_token_id=_SCREAMING_SNAKE_CASE , forced_eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get('force_bos_token_to_be_generated' , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = self.bos_token_id
warnings.warn(
f"Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. "
'The config can simply be saved and uploaded again to be fixed.' )
class A__ ( _lowerCamelCase):
@property
def __lowerCamelCase ( self ):
if self.task in ["default", "seq2seq-lm"]:
__lowerCAmelCase : int = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
__lowerCAmelCase : Dict = {0: 'batch'}
__lowerCAmelCase : Optional[Any] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__lowerCAmelCase : List[str] = {0: 'batch', 1: 'decoder_sequence'}
__lowerCAmelCase : str = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_SCREAMING_SNAKE_CASE , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__lowerCAmelCase : int = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
__lowerCAmelCase , __lowerCAmelCase : Any = self.num_layers
for i in range(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = {0: 'batch', 2: 'past_sequence + sequence'}
__lowerCAmelCase : Tuple = {0: 'batch', 2: 'past_sequence + sequence'}
else:
__lowerCAmelCase : List[Any] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def __lowerCamelCase ( self ):
if self.task in ["default", "seq2seq-lm"]:
__lowerCAmelCase : List[str] = super().outputs
else:
__lowerCAmelCase : Optional[Any] = super(_SCREAMING_SNAKE_CASE , self ).outputs
if self.use_past:
__lowerCAmelCase , __lowerCAmelCase : int = self.num_layers
for i in range(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = {0: 'batch', 2: 'past_sequence + sequence'}
__lowerCAmelCase : List[str] = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , ):
__lowerCAmelCase : str = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Generate decoder inputs
__lowerCAmelCase : Optional[int] = seq_length if not self.use_past else 1
__lowerCAmelCase : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = {f"decoder_{name}": tensor for name, tensor in decoder_inputs.items()}
__lowerCAmelCase : List[Any] = dict(**_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__lowerCAmelCase , __lowerCAmelCase : Dict = common_inputs['input_ids'].shape
__lowerCAmelCase : Optional[int] = common_inputs['decoder_input_ids'].shape[1]
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.num_attention_heads
__lowerCAmelCase : Optional[Any] = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowerCAmelCase : Any = decoder_seq_length + 3
__lowerCAmelCase : List[str] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__lowerCAmelCase : Any = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )] , dim=1 )
__lowerCAmelCase : Optional[Any] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.num_layers
__lowerCAmelCase : List[str] = min(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = max(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) - min_num_layers
__lowerCAmelCase : Optional[Any] = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(_SCREAMING_SNAKE_CASE ):
common_inputs["past_key_values"].append(
(
torch.zeros(_SCREAMING_SNAKE_CASE ),
torch.zeros(_SCREAMING_SNAKE_CASE ),
torch.zeros(_SCREAMING_SNAKE_CASE ),
torch.zeros(_SCREAMING_SNAKE_CASE ),
) )
# TODO: test this.
__lowerCAmelCase : int = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
common_inputs["past_key_values"].append((torch.zeros(_SCREAMING_SNAKE_CASE ), torch.zeros(_SCREAMING_SNAKE_CASE )) )
return common_inputs
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , ):
__lowerCAmelCase : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__lowerCAmelCase , __lowerCAmelCase : List[str] = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__lowerCAmelCase : int = seqlen + 2
__lowerCAmelCase , __lowerCAmelCase : Dict = self.num_layers
__lowerCAmelCase , __lowerCAmelCase : Tuple = self.num_attention_heads
__lowerCAmelCase : Optional[Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__lowerCAmelCase : List[str] = common_inputs['attention_mask'].dtype
__lowerCAmelCase : Any = torch.cat(
[common_inputs['attention_mask'], torch.ones(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )] , dim=1 )
__lowerCAmelCase : int = [
(torch.zeros(_SCREAMING_SNAKE_CASE ), torch.zeros(_SCREAMING_SNAKE_CASE )) for _ in range(_SCREAMING_SNAKE_CASE )
]
return common_inputs
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__lowerCAmelCase : Tuple = compute_effective_axis_dimension(
_SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__lowerCAmelCase : List[Any] = tokenizer.num_special_tokens_to_add(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = compute_effective_axis_dimension(
_SCREAMING_SNAKE_CASE , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_SCREAMING_SNAKE_CASE )
# Generate dummy inputs according to compute batch and sequence
__lowerCAmelCase : int = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
__lowerCAmelCase : List[str] = dict(tokenizer(_SCREAMING_SNAKE_CASE , return_tensors=_SCREAMING_SNAKE_CASE ) )
return common_inputs
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = -1 , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = None , ):
if self.task in ["default", "seq2seq-lm"]:
__lowerCAmelCase : Union[str, Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , seq_length=_SCREAMING_SNAKE_CASE , is_pair=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE )
elif self.task == "causal-lm":
__lowerCAmelCase : List[Any] = self._generate_dummy_inputs_for_causal_lm(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , seq_length=_SCREAMING_SNAKE_CASE , is_pair=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : int = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE , seq_length=_SCREAMING_SNAKE_CASE , is_pair=_SCREAMING_SNAKE_CASE , framework=_SCREAMING_SNAKE_CASE )
return common_inputs
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if self.task in ["default", "seq2seq-lm"]:
__lowerCAmelCase : Tuple = super()._flatten_past_key_values_(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : Union[str, Any] = super(_SCREAMING_SNAKE_CASE , self )._flatten_past_key_values_(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) | 86 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""FNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""FNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""FNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FNetForMaskedLM""",
"""FNetForMultipleChoice""",
"""FNetForNextSentencePrediction""",
"""FNetForPreTraining""",
"""FNetForQuestionAnswering""",
"""FNetForSequenceClassification""",
"""FNetForTokenClassification""",
"""FNetLayer""",
"""FNetModel""",
"""FNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 86 | 1 |
"""simple docstring"""
import math_equivalence # From: git+https://github.com/hendrycks/math.git
import datasets
lowerCamelCase__ = """\
@article{hendrycksmath2021,
title={Measuring Mathematical Problem Solving With the MATH Dataset},
author={Dan Hendrycks
and Collin Burns
and Saurav Kadavath
and Akul Arora
and Steven Basart
and Eric Tang
and Dawn Song
and Jacob Steinhardt},
journal={arXiv preprint arXiv:2103.03874},
year={2021}
}
"""
lowerCamelCase__ = """\
This metric is used to assess performance on the Mathematics Aptitude Test of Heuristics (MATH) dataset.
It first canonicalizes the inputs (e.g., converting \"1/2\" to \"\\frac{1}{2}\") and then computes accuracy.
"""
lowerCamelCase__ = R"""
Calculates accuracy after canonicalizing inputs.
Args:
predictions: list of predictions to score. Each prediction
is a string that contains natural language and LaTex.
references: list of reference for each prediction. Each
reference is a string that contains natural language
and LaTex.
Returns:
accuracy: accuracy after canonicalizing inputs
(e.g., converting \"1/2\" to \"\\frac{1}{2}\")
Examples:
>>> metric = datasets.load_metric(\"competition_math\")
>>> results = metric.compute(references=[\"\\frac{1}{2}\"], predictions=[\"1/2\"])
>>> print(results)
{'accuracy': 1.0}
"""
@datasets.utils.file_utils.add_end_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION)
class A__ ( datasets.Metric):
def __lowerCamelCase ( self ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Value('string' ),
'references': datasets.Value('string' ),
} ) , homepage='https://github.com/hendrycks/math' , codebase_urls=['https://github.com/hendrycks/math'] , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = 0.0
for i, j in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
n_correct += 1.0 if math_equivalence.is_equiv(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else 0.0
__lowerCAmelCase : Any = n_correct / len(_SCREAMING_SNAKE_CASE )
return {
"accuracy": accuracy,
} | 86 |
"""simple docstring"""
import math
import sys
def __lowerCAmelCase (_UpperCamelCase ):
if number != int(_UpperCamelCase ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
__lowerCAmelCase : Any = [-1] * (number + 1)
__lowerCAmelCase : List[Any] = 0
for i in range(1 , number + 1 ):
__lowerCAmelCase : List[Any] = sys.maxsize
__lowerCAmelCase : Optional[int] = int(math.sqrt(_UpperCamelCase ) )
for j in range(1 , root + 1 ):
__lowerCAmelCase : Optional[Any] = 1 + answers[i - (j**2)]
__lowerCAmelCase : Any = min(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : List[str] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 | 1 |
"""simple docstring"""
import argparse
import gc
import json
import os
import re
import torch
from huggingface_hub import hf_hub_download
from transformers import AutoModelForCausalLM, AutoTokenizer, PreTrainedTokenizerFast, RwkvConfig
from transformers.modeling_utils import WEIGHTS_INDEX_NAME, shard_checkpoint
lowerCamelCase__ = {
"""169M""": 12,
"""430M""": 24,
"""1B5""": 24,
"""3B""": 32,
"""7B""": 32,
"""14B""": 40,
}
lowerCamelCase__ = {
"""169M""": 768,
"""430M""": 1_024,
"""1B5""": 2_048,
"""3B""": 2_560,
"""7B""": 4_096,
"""14B""": 5_120,
}
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : List[Any] = list(state_dict.keys() )
for name in state_dict_keys:
__lowerCAmelCase : Any = state_dict.pop(_UpperCamelCase )
# emb -> embedding
if name.startswith('emb.' ):
__lowerCAmelCase : Optional[int] = name.replace('emb.' , 'embeddings.' )
# ln_0 -> pre_ln (only present at block 0)
if name.startswith('blocks.0.ln0' ):
__lowerCAmelCase : str = name.replace('blocks.0.ln0' , 'blocks.0.pre_ln' )
# att -> attention
__lowerCAmelCase : Optional[Any] = re.sub(r'blocks\.(\d+)\.att' , r'blocks.\1.attention' , _UpperCamelCase )
# ffn -> feed_forward
__lowerCAmelCase : Optional[int] = re.sub(r'blocks\.(\d+)\.ffn' , r'blocks.\1.feed_forward' , _UpperCamelCase )
# time_mix_k -> time_mix_key and reshape
if name.endswith('.time_mix_k' ):
__lowerCAmelCase : Tuple = name.replace('.time_mix_k' , '.time_mix_key' )
# time_mix_v -> time_mix_value and reshape
if name.endswith('.time_mix_v' ):
__lowerCAmelCase : Tuple = name.replace('.time_mix_v' , '.time_mix_value' )
# time_mix_r -> time_mix_key and reshape
if name.endswith('.time_mix_r' ):
__lowerCAmelCase : List[str] = name.replace('.time_mix_r' , '.time_mix_receptance' )
if name != "head.weight":
__lowerCAmelCase : Dict = 'rwkv.' + name
__lowerCAmelCase : int = weight
return state_dict
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None , _UpperCamelCase=None , _UpperCamelCase=False , _UpperCamelCase=None ):
# 1. If possible, build the tokenizer.
if tokenizer_file is None:
print('No `--tokenizer_file` provided, we will use the default tokenizer.' )
__lowerCAmelCase : Tuple = 5_0277
__lowerCAmelCase : Union[str, Any] = AutoTokenizer.from_pretrained('EleutherAI/gpt-neox-20b' )
else:
__lowerCAmelCase : Dict = PreTrainedTokenizerFast(tokenizer_file=_UpperCamelCase )
__lowerCAmelCase : int = len(_UpperCamelCase )
tokenizer.save_pretrained(_UpperCamelCase )
# 2. Build the config
__lowerCAmelCase : Optional[int] = list(NUM_HIDDEN_LAYERS_MAPPING.keys() )
if size is None:
# Try to infer size from the checkpoint name
for candidate in possible_sizes:
if candidate in checkpoint_file:
__lowerCAmelCase : str = candidate
break
if size is None:
raise ValueError('Could not infer the size, please provide it with the `--size` argument.' )
if size not in possible_sizes:
raise ValueError(F"`size` should be one of {possible_sizes}, got {size}." )
__lowerCAmelCase : Optional[int] = RwkvConfig(
vocab_size=_UpperCamelCase , num_hidden_layers=NUM_HIDDEN_LAYERS_MAPPING[size] , hidden_size=HIDEN_SIZE_MAPPING[size] , )
config.save_pretrained(_UpperCamelCase )
# 3. Download model file then convert state_dict
__lowerCAmelCase : Union[str, Any] = hf_hub_download(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : Tuple = torch.load(_UpperCamelCase , map_location='cpu' )
__lowerCAmelCase : List[Any] = convert_state_dict(_UpperCamelCase )
# 4. Split in shards and save
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = shard_checkpoint(_UpperCamelCase )
for shard_file, shard in shards.items():
torch.save(_UpperCamelCase , os.path.join(_UpperCamelCase , _UpperCamelCase ) )
if index is not None:
__lowerCAmelCase : Dict = os.path.join(_UpperCamelCase , _UpperCamelCase )
# Save the index as well
with open(_UpperCamelCase , 'w' , encoding='utf-8' ) as f:
__lowerCAmelCase : Optional[int] = json.dumps(_UpperCamelCase , indent=2 , sort_keys=_UpperCamelCase ) + '\n'
f.write(_UpperCamelCase )
# 5. Clean up shards (for some reason the file PyTorch saves take the same space as the whole state_dict
print(
'Cleaning up shards. This may error with an OOM error, it this is the case don\'t worry you still have converted the model.' )
__lowerCAmelCase : List[str] = list(shards.keys() )
del state_dict
del shards
gc.collect()
for shard_file in shard_files:
__lowerCAmelCase : Union[str, Any] = torch.load(os.path.join(_UpperCamelCase , _UpperCamelCase ) )
torch.save({k: v.cpu().clone() for k, v in state_dict.items()} , os.path.join(_UpperCamelCase , _UpperCamelCase ) )
del state_dict
gc.collect()
if push_to_hub:
if model_name is None:
raise ValueError('Please provide a `model_name` to push the model to the Hub.' )
__lowerCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained(_UpperCamelCase )
model.push_to_hub(_UpperCamelCase , max_shard_size='2GB' )
tokenizer.push_to_hub(_UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--repo_id""", default=None, type=str, required=True, help="""Repo ID from which to pull the checkpoint."""
)
parser.add_argument(
"""--checkpoint_file""", default=None, type=str, required=True, help="""Name of the checkpoint file in the repo."""
)
parser.add_argument(
"""--output_dir""", default=None, type=str, required=True, help="""Where to save the converted model."""
)
parser.add_argument(
"""--tokenizer_file""",
default=None,
type=str,
help="""Path to the tokenizer file to use (if not provided, only the model is converted).""",
)
parser.add_argument(
"""--size""",
default=None,
type=str,
help="""Size of the model. Will be inferred from the `checkpoint_file` if not passed.""",
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Push to the Hub the converted model.""",
)
parser.add_argument(
"""--model_name""",
default=None,
type=str,
help="""Name of the pushed model on the Hub, including the username / organization.""",
)
lowerCamelCase__ = parser.parse_args()
convert_rmkv_checkpoint_to_hf_format(
args.repo_id,
args.checkpoint_file,
args.output_dir,
size=args.size,
tokenizer_file=args.tokenizer_file,
push_to_hub=args.push_to_hub,
model_name=args.model_name,
) | 86 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=14 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=0.02 , ):
__lowerCAmelCase : Union[str, Any] = parent
__lowerCAmelCase : Any = batch_size
__lowerCAmelCase : Any = seq_length
__lowerCAmelCase : Optional[Any] = is_training
__lowerCAmelCase : Any = use_input_mask
__lowerCAmelCase : Any = use_token_type_ids
__lowerCAmelCase : Tuple = use_labels
__lowerCAmelCase : Optional[Any] = vocab_size
__lowerCAmelCase : Tuple = hidden_size
__lowerCAmelCase : str = rotary_dim
__lowerCAmelCase : Union[str, Any] = num_hidden_layers
__lowerCAmelCase : Union[str, Any] = num_attention_heads
__lowerCAmelCase : int = intermediate_size
__lowerCAmelCase : List[str] = hidden_act
__lowerCAmelCase : int = hidden_dropout_prob
__lowerCAmelCase : Any = attention_probs_dropout_prob
__lowerCAmelCase : List[Any] = max_position_embeddings
__lowerCAmelCase : Optional[Any] = initializer_range
__lowerCAmelCase : Tuple = None
__lowerCAmelCase : int = vocab_size - 1
__lowerCAmelCase : Dict = vocab_size - 1
__lowerCAmelCase : int = vocab_size - 1
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : List[str] = None
if self.use_input_mask:
__lowerCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : Optional[int] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=_SCREAMING_SNAKE_CASE , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = config_and_inputs
__lowerCAmelCase : Dict = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = 20
__lowerCAmelCase : List[str] = model_class_name(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = model.init_cache(input_ids.shape[0] , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' )
__lowerCAmelCase : Optional[Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCAmelCase : Any = model(
input_ids[:, :-1] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCAmelCase : int = model(
input_ids[:, -1:] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = 20
__lowerCAmelCase : List[str] = model_class_name(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__lowerCAmelCase : List[str] = model.init_cache(input_ids.shape[0] , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCAmelCase : Optional[Any] = model(
input_ids[:, :-1] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : str = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCAmelCase : Tuple = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
@require_flax
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : Tuple = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
A_ : str = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = FlaxGPTJModelTester(self )
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@tooslow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' )
__lowerCAmelCase : Optional[int] = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
__lowerCAmelCase : Any = False
__lowerCAmelCase : Any = model.config.eos_token_id
__lowerCAmelCase : Union[str, Any] = jax.jit(model.generate )
__lowerCAmelCase : Optional[Any] = jit_generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences
__lowerCAmelCase : str = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@is_pt_flax_cross_test
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCAmelCase : str = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCAmelCase : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCAmelCase : Optional[int] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : List[Any] = pt_inputs['input_ids'].shape
__lowerCAmelCase : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = 0
__lowerCAmelCase : Tuple = 1
__lowerCAmelCase : List[str] = 0
__lowerCAmelCase : Any = 1
__lowerCAmelCase : Optional[Any] = pt_model_class(_SCREAMING_SNAKE_CASE ).eval()
__lowerCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
__lowerCAmelCase : int = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = fx_state
with torch.no_grad():
__lowerCAmelCase : Union[str, Any] = pt_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
__lowerCAmelCase : str = fx_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = model_class.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = fx_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(
len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCAmelCase : List[str] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCAmelCase : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCAmelCase : str = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = pt_model_class(_SCREAMING_SNAKE_CASE ).eval()
__lowerCAmelCase : Tuple = model_class(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
__lowerCAmelCase : List[str] = load_flax_weights_in_pytorch_model(_SCREAMING_SNAKE_CASE , fx_model.params )
__lowerCAmelCase , __lowerCAmelCase : int = pt_inputs['input_ids'].shape
__lowerCAmelCase : List[str] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = 0
__lowerCAmelCase : Optional[Any] = 1
__lowerCAmelCase : Optional[int] = 0
__lowerCAmelCase : Optional[Any] = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__lowerCAmelCase : List[str] = pt_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
__lowerCAmelCase : Optional[int] = fx_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = pt_model_class.from_pretrained(_SCREAMING_SNAKE_CASE , from_flax=_SCREAMING_SNAKE_CASE )
with torch.no_grad():
__lowerCAmelCase : Any = pt_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(
len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__lowerCAmelCase : Optional[int] = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
__lowerCAmelCase : List[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) | 86 | 1 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase = 10**9 ):
__lowerCAmelCase : Union[str, Any] = 1
__lowerCAmelCase : Dict = 2
__lowerCAmelCase : List[str] = 0
__lowerCAmelCase : int = 0
__lowerCAmelCase : Dict = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
__lowerCAmelCase : int = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(f'{solution() = }') | 86 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , ):
__lowerCAmelCase : Tuple = parent
__lowerCAmelCase : Optional[int] = 13
__lowerCAmelCase : List[Any] = 7
__lowerCAmelCase : int = True
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : List[Any] = True
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : Optional[Any] = 99
__lowerCAmelCase : int = 3_84
__lowerCAmelCase : Union[str, Any] = 2
__lowerCAmelCase : Tuple = 4
__lowerCAmelCase : str = 37
__lowerCAmelCase : Any = 'gelu'
__lowerCAmelCase : List[str] = 0.1
__lowerCAmelCase : Any = 0.1
__lowerCAmelCase : Union[str, Any] = 5_12
__lowerCAmelCase : int = 16
__lowerCAmelCase : Union[str, Any] = 2
__lowerCAmelCase : int = 0.02
__lowerCAmelCase : Dict = 3
__lowerCAmelCase : Tuple = 4
__lowerCAmelCase : Tuple = 1_28
__lowerCAmelCase : Optional[int] = 2
__lowerCAmelCase : List[str] = 9
__lowerCAmelCase : int = 1
__lowerCAmelCase : int = None
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : Optional[int] = None
if self.use_input_mask:
__lowerCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : Tuple = None
if self.use_token_type_ids:
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase : Optional[Any] = None
__lowerCAmelCase : Dict = None
__lowerCAmelCase : Union[str, Any] = None
if self.use_labels:
__lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase : Union[str, Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_SCREAMING_SNAKE_CASE , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = TFConvBertModel(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__lowerCAmelCase : Tuple = [input_ids, input_mask]
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = TFConvBertForMaskedLM(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = self.num_labels
__lowerCAmelCase : Optional[Any] = TFConvBertForSequenceClassification(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = self.num_choices
__lowerCAmelCase : List[str] = TFConvBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase : Dict = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase : Union[str, Any] = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase : Tuple = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__lowerCAmelCase : str = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = self.num_labels
__lowerCAmelCase : Any = TFConvBertForTokenClassification(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = TFConvBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) : List[str] = config_and_inputs
__lowerCAmelCase : Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : List[str] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A_ : str = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A_ : List[Any] = False
A_ : str = False
A_ : List[Any] = False
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = TFConvBertModelTester(self )
__lowerCAmelCase : Any = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Any = True
__lowerCAmelCase : Dict = True
if hasattr(_SCREAMING_SNAKE_CASE , 'use_cache' ):
__lowerCAmelCase : int = True
__lowerCAmelCase : List[str] = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__lowerCAmelCase : str = getattr(self.model_tester , 'key_length' , _SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
__lowerCAmelCase : str = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = len(model(_SCREAMING_SNAKE_CASE ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE , saved_model=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = os.path.join(_SCREAMING_SNAKE_CASE , 'saved_model' , '1' )
__lowerCAmelCase : int = tf.keras.models.load_model(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
__lowerCAmelCase : List[str] = outputs['encoder_hidden_states']
__lowerCAmelCase : Tuple = outputs['encoder_attentions']
else:
__lowerCAmelCase : Optional[int] = outputs['hidden_states']
__lowerCAmelCase : Tuple = outputs['attentions']
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Optional[Any] = True
__lowerCAmelCase : List[Any] = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
__lowerCAmelCase : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__lowerCAmelCase : Tuple = getattr(self.model_tester , 'key_length' , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = getattr(self.model_tester , 'key_length' , _SCREAMING_SNAKE_CASE )
def check_decoder_attentions_output(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
self.assertEqual(out_len % 2 , 0 )
__lowerCAmelCase : Optional[Any] = outputs.decoder_attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : Optional[int] = False
__lowerCAmelCase : List[Any] = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Tuple = len(_SCREAMING_SNAKE_CASE )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
__lowerCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_decoder_attentions_output(_SCREAMING_SNAKE_CASE )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowerCAmelCase : Optional[Any] = True
__lowerCAmelCase : str = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
__lowerCAmelCase : Dict = True
__lowerCAmelCase : Optional[Any] = True
__lowerCAmelCase : List[Any] = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_SCREAMING_SNAKE_CASE ) )
self.assertEqual(model.config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
@require_tf
class A__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
__lowerCAmelCase : int = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowerCAmelCase : Tuple = model(_SCREAMING_SNAKE_CASE )[0]
__lowerCAmelCase : Tuple = [1, 6, 7_68]
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) | 86 | 1 |
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
lowerCamelCase__ = {
"""vocab_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"""
},
"""merges_file""": {
"""facebook/blenderbot_small-90M""": """https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"""
},
"""tokenizer_config_file""": {
"""facebook/blenderbot_small-90M""": (
"""https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"""
)
},
}
lowerCamelCase__ = {"""facebook/blenderbot_small-90M""": 512}
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : str = set()
__lowerCAmelCase : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__lowerCAmelCase : Tuple = char
__lowerCAmelCase : List[str] = set(_UpperCamelCase )
return pairs
class A__ ( _lowerCamelCase):
A_ : Dict = VOCAB_FILES_NAMES
A_ : Union[str, Any] = PRETRAINED_VOCAB_FILES_MAP
A_ : Dict = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Tuple = ['input_ids', 'attention_mask']
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="__start__" , _SCREAMING_SNAKE_CASE="__end__" , _SCREAMING_SNAKE_CASE="__unk__" , _SCREAMING_SNAKE_CASE="__null__" , **_SCREAMING_SNAKE_CASE , ):
super().__init__(unk_token=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as vocab_handle:
__lowerCAmelCase : Optional[Any] = json.load(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = {v: k for k, v in self.encoder.items()}
with open(_SCREAMING_SNAKE_CASE , encoding='utf-8' ) as merges_handle:
__lowerCAmelCase : Tuple = merges_handle.read().split('\n' )[1:-1]
__lowerCAmelCase : Union[str, Any] = [tuple(merge.split() ) for merge in merges]
__lowerCAmelCase : Union[str, Any] = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
__lowerCAmelCase : Any = {}
@property
def __lowerCamelCase ( self ):
return len(self.encoder )
def __lowerCamelCase ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
if token in self.cache:
return self.cache[token]
__lowerCAmelCase : Tuple = re.sub('([.,!?()])' , R' \1' , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = re.sub('(\')' , R' \1 ' , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = re.sub(R'\s{2,}' , ' ' , _SCREAMING_SNAKE_CASE )
if "\n" in token:
__lowerCAmelCase : Optional[int] = token.replace('\n' , ' __newln__' )
__lowerCAmelCase : Optional[int] = token.split(' ' )
__lowerCAmelCase : Optional[Any] = []
for token in tokens:
if not len(_SCREAMING_SNAKE_CASE ):
continue
__lowerCAmelCase : Optional[int] = token.lower()
__lowerCAmelCase : List[Any] = tuple(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
__lowerCAmelCase : List[str] = get_pairs(_SCREAMING_SNAKE_CASE )
if not pairs:
words.append(_SCREAMING_SNAKE_CASE )
continue
while True:
__lowerCAmelCase : Tuple = min(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : self.bpe_ranks.get(_SCREAMING_SNAKE_CASE , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
__lowerCAmelCase , __lowerCAmelCase : List[Any] = bigram
__lowerCAmelCase : List[str] = []
__lowerCAmelCase : str = 0
while i < len(_SCREAMING_SNAKE_CASE ):
try:
__lowerCAmelCase : int = word.index(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
new_word.extend(word[i:j] )
__lowerCAmelCase : str = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(_SCREAMING_SNAKE_CASE ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__lowerCAmelCase : Tuple = tuple(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = new_word
if len(_SCREAMING_SNAKE_CASE ) == 1:
break
else:
__lowerCAmelCase : int = get_pairs(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = '@@ '.join(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = word[:-4]
__lowerCAmelCase : Dict = word
words.append(_SCREAMING_SNAKE_CASE )
return " ".join(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = []
__lowerCAmelCase : Optional[int] = re.findall(R'\S+\n?' , _SCREAMING_SNAKE_CASE )
for token in words:
split_tokens.extend(list(self.bpe(_SCREAMING_SNAKE_CASE ).split(' ' ) ) )
return split_tokens
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = token.lower()
return self.encoder.get(_SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
return self.decoder.get(_SCREAMING_SNAKE_CASE , self.unk_token )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = ' '.join(_SCREAMING_SNAKE_CASE ).replace('@@ ' , '' ).strip()
return out_string
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
if not os.path.isdir(_SCREAMING_SNAKE_CASE ):
logger.error(f"Vocabulary path ({save_directory}) should be a directory" )
return
__lowerCAmelCase : Tuple = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__lowerCAmelCase : Optional[Any] = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_SCREAMING_SNAKE_CASE , ensure_ascii=_SCREAMING_SNAKE_CASE ) + '\n' )
__lowerCAmelCase : Union[str, Any] = 0
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _SCREAMING_SNAKE_CASE : kv[1] ):
if index != token_index:
logger.warning(
f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
' Please check that the tokenizer is not corrupted!' )
__lowerCAmelCase : Dict = token_index
writer.write(' '.join(_SCREAMING_SNAKE_CASE ) + '\n' )
index += 1
return vocab_file, merge_file | 86 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class A__ ( unittest.TestCase):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=4_00 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 / 2_55 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__lowerCAmelCase : Any = size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33}
__lowerCAmelCase : Optional[int] = parent
__lowerCAmelCase : int = batch_size
__lowerCAmelCase : str = num_channels
__lowerCAmelCase : Optional[int] = min_resolution
__lowerCAmelCase : List[Any] = max_resolution
__lowerCAmelCase : Union[str, Any] = do_resize
__lowerCAmelCase : Optional[Any] = size
__lowerCAmelCase : Dict = do_rescale
__lowerCAmelCase : Optional[Any] = rescale_factor
__lowerCAmelCase : Any = do_normalize
__lowerCAmelCase : List[str] = image_mean
__lowerCAmelCase : Union[str, Any] = image_std
__lowerCAmelCase : Optional[int] = do_pad
def __lowerCamelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
if not batched:
__lowerCAmelCase : str = image_inputs[0]
if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ):
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = image.size
else:
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = image.shape[1], image.shape[2]
if w < h:
__lowerCAmelCase : str = int(self.size['shortest_edge'] * h / w )
__lowerCAmelCase : Optional[int] = self.size['shortest_edge']
elif w > h:
__lowerCAmelCase : str = self.size['shortest_edge']
__lowerCAmelCase : Union[str, Any] = int(self.size['shortest_edge'] * w / h )
else:
__lowerCAmelCase : str = self.size['shortest_edge']
__lowerCAmelCase : Optional[Any] = self.size['shortest_edge']
else:
__lowerCAmelCase : str = []
for image in image_inputs:
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCAmelCase : Any = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0]
__lowerCAmelCase : Dict = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : List[str] = DetrImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = DetrImageProcessingTester(self )
@property
def __lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_rescale' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'rescale_factor' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
# Initialize image_processing
__lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__lowerCAmelCase : int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase : int = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
# Initialize image_processing
__lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
__lowerCAmelCase : Tuple = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : Tuple = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
# Initialize image_processing
__lowerCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
__lowerCAmelCase : Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : Dict = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase : Tuple = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : Any = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __lowerCamelCase ( self ):
# prepare image and target
__lowerCAmelCase : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
__lowerCAmelCase : Any = json.loads(f.read() )
__lowerCAmelCase : Tuple = {'image_id': 3_97_69, 'annotations': target}
# encode them
__lowerCAmelCase : Dict = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
__lowerCAmelCase : int = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
__lowerCAmelCase : str = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
# verify area
__lowerCAmelCase : List[str] = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
__lowerCAmelCase : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# verify image_id
__lowerCAmelCase : Dict = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
__lowerCAmelCase : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
__lowerCAmelCase : Union[str, Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify orig_size
__lowerCAmelCase : int = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
__lowerCAmelCase : List[Any] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
@slow
def __lowerCamelCase ( self ):
# prepare image, target and masks_path
__lowerCAmelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
__lowerCAmelCase : Optional[int] = json.loads(f.read() )
__lowerCAmelCase : Optional[int] = {'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target}
__lowerCAmelCase : Union[str, Any] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
__lowerCAmelCase : Optional[int] = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
__lowerCAmelCase : Optional[Any] = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , masks_path=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
__lowerCAmelCase : str = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
# verify area
__lowerCAmelCase : int = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
__lowerCAmelCase : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# verify image_id
__lowerCAmelCase : str = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
__lowerCAmelCase : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
__lowerCAmelCase : str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify masks
__lowerCAmelCase : Dict = 82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _SCREAMING_SNAKE_CASE )
# verify orig_size
__lowerCAmelCase : str = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
__lowerCAmelCase : List[Any] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) ) | 86 | 1 |
"""simple docstring"""
import argparse
from pathlib import Path
import requests
import torch
from PIL import Image
from transformers import (
RobertaTokenizer,
TrOCRConfig,
TrOCRForCausalLM,
TrOCRProcessor,
VisionEncoderDecoderModel,
ViTConfig,
ViTImageProcessor,
ViTModel,
)
from transformers.utils import logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Optional[Any] = []
for i in range(encoder_config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"encoder.deit.blocks.{i}.norm1.weight", F"encoder.encoder.layer.{i}.layernorm_before.weight") )
rename_keys.append((F"encoder.deit.blocks.{i}.norm1.bias", F"encoder.encoder.layer.{i}.layernorm_before.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.attn.proj.weight", F"encoder.encoder.layer.{i}.attention.output.dense.weight") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.attn.proj.bias", F"encoder.encoder.layer.{i}.attention.output.dense.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.norm2.weight", F"encoder.encoder.layer.{i}.layernorm_after.weight") )
rename_keys.append((F"encoder.deit.blocks.{i}.norm2.bias", F"encoder.encoder.layer.{i}.layernorm_after.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.mlp.fc1.weight", F"encoder.encoder.layer.{i}.intermediate.dense.weight") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.mlp.fc1.bias", F"encoder.encoder.layer.{i}.intermediate.dense.bias") )
rename_keys.append(
(F"encoder.deit.blocks.{i}.mlp.fc2.weight", F"encoder.encoder.layer.{i}.output.dense.weight") )
rename_keys.append((F"encoder.deit.blocks.{i}.mlp.fc2.bias", F"encoder.encoder.layer.{i}.output.dense.bias") )
# cls token, position embeddings and patch embeddings of encoder
rename_keys.extend(
[
('encoder.deit.cls_token', 'encoder.embeddings.cls_token'),
('encoder.deit.pos_embed', 'encoder.embeddings.position_embeddings'),
('encoder.deit.patch_embed.proj.weight', 'encoder.embeddings.patch_embeddings.projection.weight'),
('encoder.deit.patch_embed.proj.bias', 'encoder.embeddings.patch_embeddings.projection.bias'),
('encoder.deit.norm.weight', 'encoder.layernorm.weight'),
('encoder.deit.norm.bias', 'encoder.layernorm.bias'),
] )
return rename_keys
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
for i in range(encoder_config.num_hidden_layers ):
# queries, keys and values (only weights, no biases)
__lowerCAmelCase : Any = state_dict.pop(F"encoder.deit.blocks.{i}.attn.qkv.weight" )
__lowerCAmelCase : Union[str, Any] = in_proj_weight[
: encoder_config.hidden_size, :
]
__lowerCAmelCase : Optional[Any] = in_proj_weight[
encoder_config.hidden_size : encoder_config.hidden_size * 2, :
]
__lowerCAmelCase : int = in_proj_weight[
-encoder_config.hidden_size :, :
]
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Optional[Any] = dct.pop(_UpperCamelCase )
__lowerCAmelCase : List[Any] = val
def __lowerCAmelCase (_UpperCamelCase ):
if "handwritten" in checkpoint_url:
__lowerCAmelCase : Union[str, Any] = 'https://fki.tic.heia-fr.ch/static/img/a01-122-02-00.jpg' # industry
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-12.jpg" # have
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02-10.jpg" # let
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122-02.jpg" #
# url = "https://fki.tic.heia-fr.ch/static/img/a01-122.jpg"
elif "printed" in checkpoint_url or "stage1" in checkpoint_url:
__lowerCAmelCase : int = 'https://www.researchgate.net/profile/Dinh-Sang/publication/338099565/figure/fig8/AS:840413229350922@1577381536857/An-receipt-example-in-the-SROIE-2019-dataset_Q640.jpg'
__lowerCAmelCase : Dict = Image.open(requests.get(_UpperCamelCase , stream=_UpperCamelCase ).raw ).convert('RGB' )
return im
@torch.no_grad()
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Optional[Any] = ViTConfig(image_size=384 , qkv_bias=_UpperCamelCase )
__lowerCAmelCase : List[Any] = TrOCRConfig()
# size of the architecture
if "base" in checkpoint_url:
__lowerCAmelCase : Dict = 768
elif "large" in checkpoint_url:
# use ViT-large encoder
__lowerCAmelCase : Optional[Any] = 1024
__lowerCAmelCase : List[Any] = 4096
__lowerCAmelCase : Optional[int] = 24
__lowerCAmelCase : Optional[Any] = 16
__lowerCAmelCase : int = 1024
else:
raise ValueError('Should either find \'base\' or \'large\' in checkpoint URL' )
# the large-printed + stage1 checkpoints uses sinusoidal position embeddings, no layernorm afterwards
if "large-printed" in checkpoint_url or "stage1" in checkpoint_url:
__lowerCAmelCase : str = False
__lowerCAmelCase : Optional[Any] = 'relu'
__lowerCAmelCase : Dict = 1024
__lowerCAmelCase : str = True
__lowerCAmelCase : Optional[int] = False
__lowerCAmelCase : int = False
# load HuggingFace model
__lowerCAmelCase : Any = ViTModel(_UpperCamelCase , add_pooling_layer=_UpperCamelCase )
__lowerCAmelCase : str = TrOCRForCausalLM(_UpperCamelCase )
__lowerCAmelCase : Tuple = VisionEncoderDecoderModel(encoder=_UpperCamelCase , decoder=_UpperCamelCase )
model.eval()
# load state_dict of original model, rename some keys
__lowerCAmelCase : Any = torch.hub.load_state_dict_from_url(_UpperCamelCase , map_location='cpu' , check_hash=_UpperCamelCase )['model']
__lowerCAmelCase : Optional[int] = create_rename_keys(_UpperCamelCase , _UpperCamelCase )
for src, dest in rename_keys:
rename_key(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
read_in_q_k_v(_UpperCamelCase , _UpperCamelCase )
# remove parameters we don't need
del state_dict["encoder.deit.head.weight"]
del state_dict["encoder.deit.head.bias"]
del state_dict["decoder.version"]
# add prefix to decoder keys
for key, val in state_dict.copy().items():
__lowerCAmelCase : Optional[Any] = state_dict.pop(_UpperCamelCase )
if key.startswith('decoder' ) and "output_projection" not in key:
__lowerCAmelCase : Tuple = val
else:
__lowerCAmelCase : Optional[Any] = val
# load state dict
model.load_state_dict(_UpperCamelCase )
# Check outputs on an image
__lowerCAmelCase : Dict = ViTImageProcessor(size=encoder_config.image_size )
__lowerCAmelCase : Dict = RobertaTokenizer.from_pretrained('roberta-large' )
__lowerCAmelCase : Optional[Any] = TrOCRProcessor(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : Optional[Any] = processor(images=prepare_img(_UpperCamelCase ) , return_tensors='pt' ).pixel_values
# verify logits
__lowerCAmelCase : Any = torch.tensor([[model.config.decoder.decoder_start_token_id]] )
__lowerCAmelCase : Tuple = model(pixel_values=_UpperCamelCase , decoder_input_ids=_UpperCamelCase )
__lowerCAmelCase : Optional[int] = outputs.logits
__lowerCAmelCase : Tuple = torch.Size([1, 1, 5_0265] )
if "trocr-base-handwritten" in checkpoint_url:
__lowerCAmelCase : int = torch.tensor(
[-1.4502, -4.6683, -0.5347, -2.9291, 9.1435, -3.0571, 8.9764, 1.7560, 8.7358, -1.5311] )
elif "trocr-large-handwritten" in checkpoint_url:
__lowerCAmelCase : Optional[Any] = torch.tensor(
[-2.6437, -1.3129, -2.2596, -5.3455, 6.3539, 1.7604, 5.4991, 1.4702, 5.6113, 2.0170] )
elif "trocr-base-printed" in checkpoint_url:
__lowerCAmelCase : Optional[Any] = torch.tensor(
[-5.6816, -5.8388, 1.1398, -6.9034, 6.8505, -2.4393, 1.2284, -1.0232, -1.9661, -3.9210] )
elif "trocr-large-printed" in checkpoint_url:
__lowerCAmelCase : List[Any] = torch.tensor(
[-6.0162, -7.0959, 4.4155, -5.1063, 7.0468, -3.1631, 2.6466, -0.3081, -0.8106, -1.7535] )
if "stage1" not in checkpoint_url:
assert logits.shape == expected_shape, "Shape of logits not as expected"
assert torch.allclose(logits[0, 0, :10] , _UpperCamelCase , atol=1e-3 ), "First elements of logits not as expected"
Path(_UpperCamelCase ).mkdir(exist_ok=_UpperCamelCase )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCamelCase )
print(F"Saving processor to {pytorch_dump_folder_path}" )
processor.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument(
"""--checkpoint_url""",
default="""https://layoutlm.blob.core.windows.net/trocr/model_zoo/fairseq/trocr-base-handwritten.pt""",
type=str,
help="""URL to the original PyTorch checkpoint (.pth file).""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the folder to output PyTorch model."""
)
lowerCamelCase__ = parser.parse_args()
convert_tr_ocr_checkpoint(args.checkpoint_url, args.pytorch_dump_folder_path) | 86 |
"""simple docstring"""
import numpy as np
def __lowerCAmelCase (_UpperCamelCase ):
return 1 / (1 + np.exp(-vector ))
def __lowerCAmelCase (_UpperCamelCase ):
return vector * sigmoid(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 | 1 |
"""simple docstring"""
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, BlipaProcessor, BlipImageProcessor, GPTaTokenizer, PreTrainedTokenizerFast
@require_vision
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = tempfile.mkdtemp()
__lowerCAmelCase : Any = BlipImageProcessor()
__lowerCAmelCase : Union[str, Any] = GPTaTokenizer.from_pretrained('hf-internal-testing/tiny-random-GPT2Model' )
__lowerCAmelCase : List[str] = BlipaProcessor(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
processor.save_pretrained(self.tmpdirname )
def __lowerCamelCase ( self , **_SCREAMING_SNAKE_CASE ):
return AutoProcessor.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE ).tokenizer
def __lowerCamelCase ( self , **_SCREAMING_SNAKE_CASE ):
return AutoProcessor.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE ).image_processor
def __lowerCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowerCAmelCase : str = [Image.fromarray(np.moveaxis(_SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = BlipaProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase : int = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__lowerCAmelCase : Tuple = self.get_image_processor(do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
__lowerCAmelCase : Optional[int] = BlipaProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_SCREAMING_SNAKE_CASE , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = self.get_image_processor()
__lowerCAmelCase : str = self.get_tokenizer()
__lowerCAmelCase : Union[str, Any] = BlipaProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = self.prepare_image_inputs()
__lowerCAmelCase : Dict = image_processor(_SCREAMING_SNAKE_CASE , return_tensors='np' )
__lowerCAmelCase : Tuple = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = self.get_image_processor()
__lowerCAmelCase : Optional[int] = self.get_tokenizer()
__lowerCAmelCase : List[str] = BlipaProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = 'lower newer'
__lowerCAmelCase : str = processor(text=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = tokenizer(_SCREAMING_SNAKE_CASE , return_token_type_ids=_SCREAMING_SNAKE_CASE )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.get_image_processor()
__lowerCAmelCase : Any = self.get_tokenizer()
__lowerCAmelCase : int = BlipaProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = 'lower newer'
__lowerCAmelCase : Optional[Any] = self.prepare_image_inputs()
__lowerCAmelCase : Optional[int] = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] )
# test if it raises when no input is passed
with pytest.raises(_SCREAMING_SNAKE_CASE ):
processor()
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.get_image_processor()
__lowerCAmelCase : Tuple = self.get_tokenizer()
__lowerCAmelCase : Optional[Any] = BlipaProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCAmelCase : List[Any] = processor.batch_decode(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.get_image_processor()
__lowerCAmelCase : Optional[int] = self.get_tokenizer()
__lowerCAmelCase : int = BlipaProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = 'lower newer'
__lowerCAmelCase : int = self.prepare_image_inputs()
__lowerCAmelCase : Dict = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
# For now the processor supports only ['pixel_values', 'input_ids', 'attention_mask']
self.assertListEqual(list(inputs.keys() ) , ['pixel_values', 'input_ids', 'attention_mask'] ) | 86 |
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , ):
__lowerCAmelCase : Optional[int] = parent
__lowerCAmelCase : Union[str, Any] = batch_size
__lowerCAmelCase : Dict = seq_length
__lowerCAmelCase : Dict = is_training
__lowerCAmelCase : List[str] = use_input_mask
__lowerCAmelCase : int = use_token_type_ids
__lowerCAmelCase : Optional[int] = use_labels
__lowerCAmelCase : List[Any] = vocab_size
__lowerCAmelCase : Dict = hidden_size
__lowerCAmelCase : Tuple = embedding_size
__lowerCAmelCase : List[Any] = num_hidden_layers
__lowerCAmelCase : Tuple = num_attention_heads
__lowerCAmelCase : Union[str, Any] = intermediate_size
__lowerCAmelCase : Optional[Any] = hidden_act
__lowerCAmelCase : Optional[int] = hidden_dropout_prob
__lowerCAmelCase : Dict = attention_probs_dropout_prob
__lowerCAmelCase : Any = max_position_embeddings
__lowerCAmelCase : Any = type_vocab_size
__lowerCAmelCase : Union[str, Any] = type_sequence_label_size
__lowerCAmelCase : List[str] = initializer_range
__lowerCAmelCase : str = num_labels
__lowerCAmelCase : int = num_choices
__lowerCAmelCase : Union[str, Any] = scope
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : Optional[int] = None
if self.use_input_mask:
__lowerCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : str = None
if self.use_token_type_ids:
__lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase : Union[str, Any] = None
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : Union[str, Any] = None
if self.use_labels:
__lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self ):
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = MobileBertModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = MobileBertForMaskedLM(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = MobileBertForNextSentencePrediction(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Dict = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = MobileBertForPreTraining(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : List[Any] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , next_sentence_label=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = MobileBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : List[str] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = self.num_labels
__lowerCAmelCase : Tuple = MobileBertForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : str = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = self.num_labels
__lowerCAmelCase : int = MobileBertForTokenClassification(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = self.num_choices
__lowerCAmelCase : List[str] = MobileBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : List[str] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) : Optional[Any] = config_and_inputs
__lowerCAmelCase : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : str = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
A_ : List[str] = (
{
'feature-extraction': MobileBertModel,
'fill-mask': MobileBertForMaskedLM,
'question-answering': MobileBertForQuestionAnswering,
'text-classification': MobileBertForSequenceClassification,
'token-classification': MobileBertForTokenClassification,
'zero-shot': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ : Dict = True
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
__lowerCAmelCase : List[str] = super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
if return_labels:
if model_class in get_values(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
return inputs_dict
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = MobileBertModelTester(self )
__lowerCAmelCase : str = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase (_UpperCamelCase ):
return torch.tensor(
_UpperCamelCase , dtype=torch.long , device=_UpperCamelCase , )
lowerCamelCase__ = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
__lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE )[0]
__lowerCAmelCase : Dict = torch.Size((1, 9, 5_12) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = torch.tensor(
[
[
[-2.4_73_65_26E07, 8.2_69_16_56E04, 1.6_52_18_38E05],
[-5.7_54_17_04E-01, 3.9_05_60_22E00, 4.4_01_15_07E00],
[2.6_04_73_59E00, 1.5_67_76_52E00, -1.7_32_41_88E-01],
]
] , device=_SCREAMING_SNAKE_CASE , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
__lowerCAmelCase : Tuple = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
__lowerCAmelCase : Union[str, Any] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound ) | 86 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
def __lowerCAmelCase ():
__lowerCAmelCase : dict[int, int] = {}
__lowerCAmelCase : List[str] = 2
while True:
__lowerCAmelCase : List[Any] = factor_map.pop(_UpperCamelCase , _UpperCamelCase )
if factor:
__lowerCAmelCase : Optional[Any] = factor + prime
while x in factor_map:
x += factor
__lowerCAmelCase : str = factor
else:
__lowerCAmelCase : List[Any] = prime
yield prime
prime += 1
def __lowerCAmelCase (_UpperCamelCase = 1e1_0 ):
__lowerCAmelCase : Dict = sieve()
__lowerCAmelCase : Optional[Any] = 1
while True:
__lowerCAmelCase : int = next(_UpperCamelCase )
if (2 * prime * n) > limit:
return n
# Ignore the next prime as the reminder will be 2.
next(_UpperCamelCase )
n += 2
if __name__ == "__main__":
print(solution()) | 86 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class A__ ( _lowerCamelCase):
A_ : Any = ['image_processor', 'tokenizer']
A_ : Optional[Any] = 'AutoImageProcessor'
A_ : str = 'AutoTokenizer'
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = kwargs.pop('feature_extractor' )
__lowerCAmelCase : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = self.image_processor
__lowerCAmelCase : Tuple = False
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = kwargs.pop('images' , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = kwargs.pop('text' , _SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
__lowerCAmelCase : Dict = args[0]
__lowerCAmelCase : Union[str, Any] = args[1:]
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
__lowerCAmelCase : Union[str, Any] = self.image_processor(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None:
__lowerCAmelCase : Dict = self.tokenizer(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is None:
return inputs
elif images is None:
return encodings
else:
__lowerCAmelCase : Union[str, Any] = encodings['input_ids']
return inputs
def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@contextmanager
def __lowerCamelCase ( self ):
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your images inputs, or in a separate call.' )
__lowerCAmelCase : Any = True
__lowerCAmelCase : Dict = self.tokenizer
yield
__lowerCAmelCase : Optional[int] = self.image_processor
__lowerCAmelCase : Optional[Any] = False
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None ):
if added_vocab is None:
__lowerCAmelCase : str = self.tokenizer.get_added_vocab()
__lowerCAmelCase : List[Any] = {}
while tokens:
__lowerCAmelCase : int = re.search(R'<s_(.*?)>' , _SCREAMING_SNAKE_CASE , re.IGNORECASE )
if start_token is None:
break
__lowerCAmelCase : Union[str, Any] = start_token.group(1 )
__lowerCAmelCase : Tuple = re.search(Rf"</s_{key}>" , _SCREAMING_SNAKE_CASE , re.IGNORECASE )
__lowerCAmelCase : str = start_token.group()
if end_token is None:
__lowerCAmelCase : Optional[int] = tokens.replace(_SCREAMING_SNAKE_CASE , '' )
else:
__lowerCAmelCase : Optional[Any] = end_token.group()
__lowerCAmelCase : Tuple = re.escape(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = re.escape(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = re.search(f"{start_token_escaped}(.*?){end_token_escaped}" , _SCREAMING_SNAKE_CASE , re.IGNORECASE )
if content is not None:
__lowerCAmelCase : List[str] = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
__lowerCAmelCase : int = self.tokenajson(_SCREAMING_SNAKE_CASE , is_inner_value=_SCREAMING_SNAKE_CASE , added_vocab=_SCREAMING_SNAKE_CASE )
if value:
if len(_SCREAMING_SNAKE_CASE ) == 1:
__lowerCAmelCase : Tuple = value[0]
__lowerCAmelCase : Tuple = value
else: # leaf nodes
__lowerCAmelCase : Any = []
for leaf in content.split(R'<sep/>' ):
__lowerCAmelCase : List[Any] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
__lowerCAmelCase : Dict = leaf[1:-2] # for categorical special tokens
output[key].append(_SCREAMING_SNAKE_CASE )
if len(output[key] ) == 1:
__lowerCAmelCase : str = output[key][0]
__lowerCAmelCase : Dict = tokens[tokens.find(_SCREAMING_SNAKE_CASE ) + len(_SCREAMING_SNAKE_CASE ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=_SCREAMING_SNAKE_CASE , added_vocab=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def __lowerCamelCase ( self ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def __lowerCamelCase ( self ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _SCREAMING_SNAKE_CASE , )
return self.image_processor | 86 | 1 |
"""simple docstring"""
lowerCamelCase__ = 65_521
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : List[Any] = 1
__lowerCAmelCase : Union[str, Any] = 0
for plain_chr in plain_text:
__lowerCAmelCase : int = (a + ord(_UpperCamelCase )) % MOD_ADLER
__lowerCAmelCase : List[str] = (b + a) % MOD_ADLER
return (b << 16) | a | 86 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Tuple = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def __lowerCAmelCase (_UpperCamelCase = 100 ):
__lowerCAmelCase : Optional[int] = 1
__lowerCAmelCase : Optional[Any] = 2
for i in range(2 , max_n + 1 ):
__lowerCAmelCase : Any = pre_numerator
__lowerCAmelCase : Union[str, Any] = 2 * i // 3 if i % 3 == 0 else 1
__lowerCAmelCase : int = cur_numerator
__lowerCAmelCase : Dict = e_cont * pre_numerator + temp
return sum_digits(_UpperCamelCase )
if __name__ == "__main__":
print(f'{solution() = }') | 86 | 1 |
"""simple docstring"""
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import ResNetConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFResNetForImageClassification, TFResNetModel
from transformers.models.resnet.modeling_tf_resnet import TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=[10, 20, 30, 40] , _SCREAMING_SNAKE_CASE=[1, 1, 2, 1] , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE="relu" , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=None , ):
__lowerCAmelCase : Optional[int] = parent
__lowerCAmelCase : int = batch_size
__lowerCAmelCase : Dict = image_size
__lowerCAmelCase : Union[str, Any] = num_channels
__lowerCAmelCase : List[Any] = embeddings_size
__lowerCAmelCase : Tuple = hidden_sizes
__lowerCAmelCase : Dict = depths
__lowerCAmelCase : int = is_training
__lowerCAmelCase : Any = use_labels
__lowerCAmelCase : Union[str, Any] = hidden_act
__lowerCAmelCase : Any = num_labels
__lowerCAmelCase : int = scope
__lowerCAmelCase : List[Any] = len(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase : List[Any] = None
if self.use_labels:
__lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_labels )
__lowerCAmelCase : Optional[Any] = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
return ResNetConfig(
num_channels=self.num_channels , embeddings_size=self.embeddings_size , hidden_sizes=self.hidden_sizes , depths=self.depths , hidden_act=self.hidden_act , num_labels=self.num_labels , image_size=self.image_size , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = TFResNetModel(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = model(_SCREAMING_SNAKE_CASE )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = self.num_labels
__lowerCAmelCase : Tuple = TFResNetForImageClassification(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Dict = config_and_inputs
__lowerCAmelCase : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_tf
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : List[str] = (TFResNetModel, TFResNetForImageClassification) if is_tf_available() else ()
A_ : Dict = (
{'feature-extraction': TFResNetModel, 'image-classification': TFResNetForImageClassification}
if is_tf_available()
else {}
)
A_ : Any = False
A_ : int = False
A_ : Optional[int] = False
A_ : int = False
A_ : str = False
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = TFResNetModelTester(self )
__lowerCAmelCase : Union[str, Any] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , has_text_modality=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCamelCase ( self ):
return
@unittest.skip(reason='ResNet does not use inputs_embeds' )
def __lowerCamelCase ( self ):
pass
@unittest.skip(reason='ResNet does not support input and output embeddings' )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : List[Any] = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase : Dict = [*signature.parameters.keys()]
__lowerCAmelCase : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
def check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Optional[Any] = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__lowerCAmelCase : Union[str, Any] = self.model_tester.num_stages
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , expected_num_stages + 1 )
# ResNet's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [self.model_tester.image_size // 4, self.model_tester.image_size // 4] , )
__lowerCAmelCase , __lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : int = ['basic', 'bottleneck']
for model_class in self.all_model_classes:
for layer_type in layers_type:
__lowerCAmelCase : Dict = layer_type
__lowerCAmelCase : Dict = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase : int = True
check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Dict = TFResNetModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase ():
__lowerCAmelCase : Optional[Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_tf
@require_vision
class A__ ( unittest.TestCase):
@cached_property
def __lowerCamelCase ( self ):
return (
AutoImageProcessor.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
if is_vision_available()
else None
)
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = TFResNetForImageClassification.from_pretrained(TF_RESNET_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
__lowerCAmelCase : Tuple = self.default_image_processor
__lowerCAmelCase : int = prepare_img()
__lowerCAmelCase : List[str] = image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors='tf' )
# forward pass
__lowerCAmelCase : Any = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
__lowerCAmelCase : Optional[Any] = tf.TensorShape((1, 10_00) )
self.assertEqual(outputs.logits.shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = tf.constant([-11.1069, -9.7877, -8.3777] )
self.assertTrue(np.allclose(outputs.logits[0, :3].numpy() , _SCREAMING_SNAKE_CASE , atol=1E-4 ) ) | 86 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""",
"""microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""",
}
class A__ ( _lowerCamelCase):
A_ : List[Any] = 'markuplm'
def __init__( self , _SCREAMING_SNAKE_CASE=3_05_22 , _SCREAMING_SNAKE_CASE=7_68 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=30_72 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2_56 , _SCREAMING_SNAKE_CASE=10_24 , _SCREAMING_SNAKE_CASE=2_16 , _SCREAMING_SNAKE_CASE=10_01 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=50 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ):
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = vocab_size
__lowerCAmelCase : Any = hidden_size
__lowerCAmelCase : List[Any] = num_hidden_layers
__lowerCAmelCase : Tuple = num_attention_heads
__lowerCAmelCase : Union[str, Any] = hidden_act
__lowerCAmelCase : List[Any] = intermediate_size
__lowerCAmelCase : List[str] = hidden_dropout_prob
__lowerCAmelCase : List[str] = attention_probs_dropout_prob
__lowerCAmelCase : Optional[int] = max_position_embeddings
__lowerCAmelCase : int = type_vocab_size
__lowerCAmelCase : Tuple = initializer_range
__lowerCAmelCase : int = layer_norm_eps
__lowerCAmelCase : List[str] = position_embedding_type
__lowerCAmelCase : List[Any] = use_cache
__lowerCAmelCase : Optional[Any] = classifier_dropout
# additional properties
__lowerCAmelCase : Optional[int] = max_depth
__lowerCAmelCase : List[str] = max_xpath_tag_unit_embeddings
__lowerCAmelCase : Optional[Any] = max_xpath_subs_unit_embeddings
__lowerCAmelCase : Any = tag_pad_id
__lowerCAmelCase : Union[str, Any] = subs_pad_id
__lowerCAmelCase : int = xpath_unit_hidden_size | 86 | 1 |
"""simple docstring"""
import argparse
import pathlib
import fairseq
import torch
from fairseq.models.roberta import RobertaModel as FairseqRobertaModel
from fairseq.modules import TransformerSentenceEncoderLayer
from packaging import version
from transformers import XLMRobertaConfig, XLMRobertaXLForMaskedLM, XLMRobertaXLForSequenceClassification
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.models.roberta.modeling_roberta import RobertaAttention
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse("""1.0.0a"""):
raise Exception("""requires fairseq >= 1.0.0a""")
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = """Hello world! cécé herlolip"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Any = FairseqRobertaModel.from_pretrained(_UpperCamelCase )
roberta.eval() # disable dropout
__lowerCAmelCase : Any = roberta.model.encoder.sentence_encoder
__lowerCAmelCase : Optional[Any] = XLMRobertaConfig(
vocab_size=roberta_sent_encoder.embed_tokens.num_embeddings , hidden_size=roberta.cfg.model.encoder_embed_dim , num_hidden_layers=roberta.cfg.model.encoder_layers , num_attention_heads=roberta.cfg.model.encoder_attention_heads , intermediate_size=roberta.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , )
if classification_head:
__lowerCAmelCase : Optional[Any] = roberta.model.classification_heads['mnli'].out_proj.weight.shape[0]
print('Our RoBERTa config:' , _UpperCamelCase )
__lowerCAmelCase : Tuple = XLMRobertaXLForSequenceClassification(_UpperCamelCase ) if classification_head else XLMRobertaXLForMaskedLM(_UpperCamelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
__lowerCAmelCase : Optional[Any] = roberta_sent_encoder.embed_tokens.weight
__lowerCAmelCase : str = roberta_sent_encoder.embed_positions.weight
__lowerCAmelCase : int = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c RoBERTa doesn't use them.
__lowerCAmelCase : int = roberta_sent_encoder.layer_norm.weight
__lowerCAmelCase : Optional[int] = roberta_sent_encoder.layer_norm.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
__lowerCAmelCase : BertLayer = model.roberta.encoder.layer[i]
__lowerCAmelCase : TransformerSentenceEncoderLayer = roberta_sent_encoder.layers[i]
__lowerCAmelCase : RobertaAttention = layer.attention
__lowerCAmelCase : int = roberta_layer.self_attn_layer_norm.weight
__lowerCAmelCase : Optional[Any] = roberta_layer.self_attn_layer_norm.bias
# self attention
__lowerCAmelCase : BertSelfAttention = layer.attention.self
assert (
roberta_layer.self_attn.k_proj.weight.data.shape
== roberta_layer.self_attn.q_proj.weight.data.shape
== roberta_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
)
__lowerCAmelCase : Optional[Any] = roberta_layer.self_attn.q_proj.weight
__lowerCAmelCase : Dict = roberta_layer.self_attn.q_proj.bias
__lowerCAmelCase : int = roberta_layer.self_attn.k_proj.weight
__lowerCAmelCase : Any = roberta_layer.self_attn.k_proj.bias
__lowerCAmelCase : Optional[int] = roberta_layer.self_attn.v_proj.weight
__lowerCAmelCase : Optional[Any] = roberta_layer.self_attn.v_proj.bias
# self-attention output
__lowerCAmelCase : BertSelfOutput = layer.attention.output
assert self_output.dense.weight.shape == roberta_layer.self_attn.out_proj.weight.shape
__lowerCAmelCase : Union[str, Any] = roberta_layer.self_attn.out_proj.weight
__lowerCAmelCase : Optional[int] = roberta_layer.self_attn.out_proj.bias
# this one is final layer norm
__lowerCAmelCase : str = roberta_layer.final_layer_norm.weight
__lowerCAmelCase : str = roberta_layer.final_layer_norm.bias
# intermediate
__lowerCAmelCase : BertIntermediate = layer.intermediate
assert intermediate.dense.weight.shape == roberta_layer.fca.weight.shape
__lowerCAmelCase : str = roberta_layer.fca.weight
__lowerCAmelCase : Optional[int] = roberta_layer.fca.bias
# output
__lowerCAmelCase : BertOutput = layer.output
assert bert_output.dense.weight.shape == roberta_layer.fca.weight.shape
__lowerCAmelCase : Tuple = roberta_layer.fca.weight
__lowerCAmelCase : Union[str, Any] = roberta_layer.fca.bias
# end of layer
if classification_head:
__lowerCAmelCase : List[str] = roberta.model.classification_heads['mnli'].dense.weight
__lowerCAmelCase : Tuple = roberta.model.classification_heads['mnli'].dense.bias
__lowerCAmelCase : List[Any] = roberta.model.classification_heads['mnli'].out_proj.weight
__lowerCAmelCase : Optional[int] = roberta.model.classification_heads['mnli'].out_proj.bias
else:
# LM Head
__lowerCAmelCase : int = roberta.model.encoder.lm_head.dense.weight
__lowerCAmelCase : int = roberta.model.encoder.lm_head.dense.bias
__lowerCAmelCase : Optional[int] = roberta.model.encoder.lm_head.layer_norm.weight
__lowerCAmelCase : List[Any] = roberta.model.encoder.lm_head.layer_norm.bias
__lowerCAmelCase : Any = roberta.model.encoder.lm_head.weight
__lowerCAmelCase : List[str] = roberta.model.encoder.lm_head.bias
# Let's check that we get the same results.
__lowerCAmelCase : torch.Tensor = roberta.encode(_UpperCamelCase ).unsqueeze(0 ) # batch of size 1
__lowerCAmelCase : Any = model(_UpperCamelCase )[0]
if classification_head:
__lowerCAmelCase : Optional[Any] = roberta.model.classification_heads['mnli'](roberta.extract_features(_UpperCamelCase ) )
else:
__lowerCAmelCase : List[Any] = roberta.model(_UpperCamelCase )[0]
print(our_output.shape , their_output.shape )
__lowerCAmelCase : List[str] = torch.max(torch.abs(our_output - their_output ) ).item()
print(F"max_absolute_diff = {max_absolute_diff}" ) # ~ 1e-7
__lowerCAmelCase : Optional[Any] = torch.allclose(_UpperCamelCase , _UpperCamelCase , atol=1e-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
pathlib.Path(_UpperCamelCase ).mkdir(parents=_UpperCamelCase , exist_ok=_UpperCamelCase )
print(F"Saving model to {pytorch_dump_folder_path}" )
model.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--roberta_checkpoint_path""", default=None, type=str, required=True, help="""Path the official PyTorch dump."""
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--classification_head""", action="""store_true""", help="""Whether to convert a final classification head."""
)
lowerCamelCase__ = parser.parse_args()
convert_xlm_roberta_xl_checkpoint_to_pytorch(
args.roberta_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
) | 86 |
"""simple docstring"""
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
lowerCamelCase__ = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
for attribute in key.split('.' ):
__lowerCAmelCase : str = getattr(_UpperCamelCase , _UpperCamelCase )
if weight_type is not None:
__lowerCAmelCase : Tuple = getattr(_UpperCamelCase , _UpperCamelCase ).shape
else:
__lowerCAmelCase : Dict = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
__lowerCAmelCase : Union[str, Any] = value
elif weight_type == "weight_g":
__lowerCAmelCase : List[Any] = value
elif weight_type == "weight_v":
__lowerCAmelCase : Any = value
elif weight_type == "bias":
__lowerCAmelCase : List[str] = value
else:
__lowerCAmelCase : List[Any] = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Any = []
__lowerCAmelCase : Optional[int] = fairseq_model.state_dict()
__lowerCAmelCase : Union[str, Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__lowerCAmelCase : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hf_model.config.feat_extract_norm == 'group' , )
__lowerCAmelCase : str = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__lowerCAmelCase : int = True
if "*" in mapped_key:
__lowerCAmelCase : List[str] = name.split(_UpperCamelCase )[0].split('.' )[-2]
__lowerCAmelCase : Optional[Any] = mapped_key.replace('*' , _UpperCamelCase )
if "weight_g" in name:
__lowerCAmelCase : Union[str, Any] = 'weight_g'
elif "weight_v" in name:
__lowerCAmelCase : int = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
__lowerCAmelCase : Optional[Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCAmelCase : List[str] = 'weight'
else:
__lowerCAmelCase : Optional[Any] = None
set_recursively(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
continue
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(F"Unused weights: {unused_weights}" )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : List[Any] = full_name.split('conv_layers.' )[-1]
__lowerCAmelCase : Any = name.split('.' )
__lowerCAmelCase : List[Any] = int(items[0] )
__lowerCAmelCase : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
__lowerCAmelCase : Tuple = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
__lowerCAmelCase : int = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
__lowerCAmelCase : Optional[Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
__lowerCAmelCase : Any = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_UpperCamelCase )
@torch.no_grad()
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
# load the pre-trained checkpoints
__lowerCAmelCase : Any = torch.load(_UpperCamelCase )
__lowerCAmelCase : List[str] = WavLMConfigOrig(checkpoint['cfg'] )
__lowerCAmelCase : Optional[Any] = WavLMOrig(_UpperCamelCase )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
__lowerCAmelCase : Dict = WavLMConfig.from_pretrained(_UpperCamelCase )
else:
__lowerCAmelCase : List[str] = WavLMConfig()
__lowerCAmelCase : List[str] = WavLMModel(_UpperCamelCase )
recursively_load_weights(_UpperCamelCase , _UpperCamelCase )
hf_wavlm.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
lowerCamelCase__ = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 86 | 1 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
# Check if the input is valid
if not len(_UpperCamelCase ) == len(_UpperCamelCase ) == 3:
raise ValueError('Please enter a valid equation.' )
if equationa[0] == equationa[1] == equationa[0] == equationa[1] == 0:
raise ValueError('Both a & b of two equations can\'t be zero.' )
# Extract the coefficients
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : List[str] = equationa
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : List[Any] = equationa
# Calculate the determinants of the matrices
__lowerCAmelCase : Union[str, Any] = aa * ba - aa * ba
__lowerCAmelCase : Optional[int] = ca * ba - ca * ba
__lowerCAmelCase : str = aa * ca - aa * ca
# Check if the system of linear equations has a solution (using Cramer's rule)
if determinant == 0:
if determinant_x == determinant_y == 0:
raise ValueError('Infinite solutions. (Consistent system)' )
else:
raise ValueError('No solution. (Inconsistent system)' )
else:
if determinant_x == determinant_y == 0:
# Trivial solution (Inconsistent system)
return (0.0, 0.0)
else:
__lowerCAmelCase : List[Any] = determinant_x / determinant
__lowerCAmelCase : List[Any] = determinant_y / determinant
# Non-Trivial Solution (Consistent system)
return (x, y) | 86 |
"""simple docstring"""
import os
import pytest
from attr import dataclass
lowerCamelCase__ = """us-east-1""" # defaults region
@dataclass
class A__ :
A_ : str
A_ : Union[str, Any] = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
A_ : Optional[int] = {
'task_name': 'mnli',
'per_device_train_batch_size': 1_6,
'per_device_eval_batch_size': 1_6,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 5_0_0,
'save_steps': 5_5_0_0,
}
A_ : List[Any] = {**hyperparameters, 'max_steps': 1_0_0_0}
@property
def __lowerCamelCase ( self ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def __lowerCamelCase ( self ):
return f"{self.framework}-transfromers-test"
@property
def __lowerCamelCase ( self ):
return f"./tests/sagemaker/scripts/{self.framework}"
@property
def __lowerCamelCase ( self ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : str = SageMakerTestEnvironment(framework=request.cls.framework ) | 86 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = TFCamembertModel.from_pretrained('jplu/tf-camembert-base' )
__lowerCAmelCase : Optional[Any] = tf.convert_to_tensor(
[[5, 1_21, 11, 6_60, 16, 7_30, 2_55_43, 1_10, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__lowerCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE )['last_hidden_state']
__lowerCAmelCase : Optional[Any] = tf.TensorShape((1, 10, 7_68) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
# compare the actual values for a slice.
__lowerCAmelCase : Optional[Any] = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) ) | 86 |
"""simple docstring"""
from __future__ import annotations
lowerCamelCase__ = list[tuple[int, int]]
lowerCamelCase__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowerCamelCase__ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : int = pos_x
__lowerCAmelCase : Optional[Any] = pos_y
__lowerCAmelCase : Optional[int] = (pos_y, pos_x)
__lowerCAmelCase : Union[str, Any] = goal_x
__lowerCAmelCase : Any = goal_y
__lowerCAmelCase : Optional[Any] = g_cost
__lowerCAmelCase : Any = parent
__lowerCAmelCase : Union[str, Any] = self.calculate_heuristic()
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = abs(self.pos_x - self.goal_x )
__lowerCAmelCase : str = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , _SCREAMING_SNAKE_CASE ):
return self.f_cost < other.f_cost
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = [self.start]
__lowerCAmelCase : list[Node] = []
__lowerCAmelCase : str = False
def __lowerCamelCase ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__lowerCAmelCase : Optional[int] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
__lowerCAmelCase : Union[str, Any] = True
return self.retrace_path(_SCREAMING_SNAKE_CASE )
self.closed_nodes.append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = self.get_successors(_SCREAMING_SNAKE_CASE )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
else:
# retrieve the best current path
__lowerCAmelCase : Optional[Any] = self.open_nodes.pop(self.open_nodes.index(_SCREAMING_SNAKE_CASE ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
else:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
if not self.reached:
return [self.start.pos]
return None
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = []
for action in delta:
__lowerCAmelCase : Optional[int] = parent.pos_x + action[1]
__lowerCAmelCase : Union[str, Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_SCREAMING_SNAKE_CASE ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _SCREAMING_SNAKE_CASE , ) )
return successors
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = node
__lowerCAmelCase : Optional[int] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowerCAmelCase : int = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
lowerCamelCase__ = (0, 0)
lowerCamelCase__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("""------""")
lowerCamelCase__ = GreedyBestFirst(init, goal)
lowerCamelCase__ = greedy_bf.search()
if path:
for pos_x, pos_y in path:
lowerCamelCase__ = 2
for elem in grid:
print(elem) | 86 | 1 |
"""simple docstring"""
import numpy as np
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Any = int(np.ceil((x_end - xa) / h ) )
__lowerCAmelCase : Union[str, Any] = np.zeros((n + 1,) )
__lowerCAmelCase : Union[str, Any] = ya
__lowerCAmelCase : List[str] = xa
for k in range(_UpperCamelCase ):
__lowerCAmelCase : Tuple = f(_UpperCamelCase , y[k] )
__lowerCAmelCase : List[str] = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
__lowerCAmelCase : Union[str, Any] = f(x + 0.5 * h , y[k] + 0.5 * h * ka )
__lowerCAmelCase : Optional[Any] = f(x + h , y[k] + h * ka )
__lowerCAmelCase : Optional[int] = y[k] + (1 / 6) * h * (ka + 2 * ka + 2 * ka + ka)
x += h
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float(moles / volume ) * nfactor )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 | 1 |
"""simple docstring"""
from collections.abc import Generator
def __lowerCAmelCase ():
__lowerCAmelCase , __lowerCAmelCase : List[Any] = 0, 1
while True:
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = b, a + b
yield b
def __lowerCAmelCase (_UpperCamelCase = 1000 ):
__lowerCAmelCase : Optional[int] = 1
__lowerCAmelCase : List[str] = fibonacci_generator()
while len(str(next(_UpperCamelCase ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 86 |
"""simple docstring"""
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class A__ ( enum.Enum):
A_ : List[Any] = 0
A_ : Dict = 1
A_ : Union[str, Any] = 2
@add_end_docstrings(_lowerCamelCase)
class A__ ( _lowerCamelCase):
A_ : str = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
__lowerCAmelCase : Any = None
if self.model.config.prefix is not None:
__lowerCAmelCase : str = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
__lowerCAmelCase : Tuple = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self._sanitize_parameters(prefix=_SCREAMING_SNAKE_CASE , **self._forward_params )
__lowerCAmelCase : List[str] = {**self._preprocess_params, **preprocess_params}
__lowerCAmelCase : List[str] = {**self._forward_params, **forward_params}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : Optional[int] = {}
if prefix is not None:
__lowerCAmelCase : Union[str, Any] = prefix
if prefix:
__lowerCAmelCase : Dict = self.tokenizer(
_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
__lowerCAmelCase : List[Any] = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
' [None, \'hole\']' )
__lowerCAmelCase : int = handle_long_generation
preprocess_params.update(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = generate_kwargs
__lowerCAmelCase : List[Any] = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
__lowerCAmelCase : Optional[Any] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
__lowerCAmelCase : List[Any] = ReturnType.TENSORS
if return_type is not None:
__lowerCAmelCase : Optional[Any] = return_type
if clean_up_tokenization_spaces is not None:
__lowerCAmelCase : Tuple = clean_up_tokenization_spaces
if stop_sequence is not None:
__lowerCAmelCase : Union[str, Any] = self.tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
__lowerCAmelCase : Optional[Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="" , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = self.tokenizer(
prefix + prompt_text , padding=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
__lowerCAmelCase : Optional[Any] = prompt_text
if handle_long_generation == "hole":
__lowerCAmelCase : str = inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
__lowerCAmelCase : Union[str, Any] = generate_kwargs['max_new_tokens']
else:
__lowerCAmelCase : Any = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
__lowerCAmelCase : Any = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
__lowerCAmelCase : int = inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
__lowerCAmelCase : List[Any] = inputs['attention_mask'][:, -keep_length:]
return inputs
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = model_inputs['input_ids']
__lowerCAmelCase : List[Any] = model_inputs.get('attention_mask' , _SCREAMING_SNAKE_CASE )
# Allow empty prompts
if input_ids.shape[1] == 0:
__lowerCAmelCase : Dict = None
__lowerCAmelCase : str = None
__lowerCAmelCase : Tuple = 1
else:
__lowerCAmelCase : Any = input_ids.shape[0]
__lowerCAmelCase : Union[str, Any] = model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
__lowerCAmelCase : Optional[int] = generate_kwargs.pop('prefix_length' , 0 )
if prefix_length > 0:
__lowerCAmelCase : Any = 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
__lowerCAmelCase : List[str] = generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
__lowerCAmelCase : Dict = 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
__lowerCAmelCase : Optional[int] = self.model.generate(input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = generated_sequence.shape[0]
if self.framework == "pt":
__lowerCAmelCase : Dict = generated_sequence.reshape(_SCREAMING_SNAKE_CASE , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
__lowerCAmelCase : Any = tf.reshape(_SCREAMING_SNAKE_CASE , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=ReturnType.FULL_TEXT , _SCREAMING_SNAKE_CASE=True ):
__lowerCAmelCase : Any = model_outputs['generated_sequence'][0]
__lowerCAmelCase : Tuple = model_outputs['input_ids']
__lowerCAmelCase : Any = model_outputs['prompt_text']
__lowerCAmelCase : int = generated_sequence.numpy().tolist()
__lowerCAmelCase : Union[str, Any] = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
__lowerCAmelCase : int = {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
__lowerCAmelCase : Any = self.tokenizer.decode(
_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
__lowerCAmelCase : Optional[Any] = 0
else:
__lowerCAmelCase : Any = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE , ) )
if return_type == ReturnType.FULL_TEXT:
__lowerCAmelCase : Union[str, Any] = prompt_text + text[prompt_length:]
else:
__lowerCAmelCase : int = text[prompt_length:]
__lowerCAmelCase : Dict = {'generated_text': all_text}
records.append(_SCREAMING_SNAKE_CASE )
return records | 86 | 1 |
"""simple docstring"""
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class A__ ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase):
A_ : List[str] = [R'h\.\d+\.attn\.bias', R'h\.\d+\.attn\.masked_bias']
@register_to_config
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 5_02_57 , _SCREAMING_SNAKE_CASE = 10_24 , _SCREAMING_SNAKE_CASE = 7_68 , _SCREAMING_SNAKE_CASE = 12 , _SCREAMING_SNAKE_CASE = 12 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "gelu_new" , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = 0.1 , _SCREAMING_SNAKE_CASE = 1E-5 , _SCREAMING_SNAKE_CASE = 0.02 , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = True , _SCREAMING_SNAKE_CASE = False , _SCREAMING_SNAKE_CASE = False , ):
super().__init__()
__lowerCAmelCase : Dict = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f"`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and"
f" `n_embd`: {n_embd} are not equal." )
__lowerCAmelCase : int = prefix_inner_dim
__lowerCAmelCase : Optional[Any] = prefix_hidden_dim
__lowerCAmelCase : Union[str, Any] = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
__lowerCAmelCase : Union[str, Any] = (
nn.Linear(self.prefix_hidden_dim , _SCREAMING_SNAKE_CASE ) if self.prefix_hidden_dim is not None else nn.Identity()
)
__lowerCAmelCase : Dict = GPTaConfig(
vocab_size=_SCREAMING_SNAKE_CASE , n_positions=_SCREAMING_SNAKE_CASE , n_embd=_SCREAMING_SNAKE_CASE , n_layer=_SCREAMING_SNAKE_CASE , n_head=_SCREAMING_SNAKE_CASE , n_inner=_SCREAMING_SNAKE_CASE , activation_function=_SCREAMING_SNAKE_CASE , resid_pdrop=_SCREAMING_SNAKE_CASE , embd_pdrop=_SCREAMING_SNAKE_CASE , attn_pdrop=_SCREAMING_SNAKE_CASE , layer_norm_epsilon=_SCREAMING_SNAKE_CASE , initializer_range=_SCREAMING_SNAKE_CASE , scale_attn_weights=_SCREAMING_SNAKE_CASE , use_cache=_SCREAMING_SNAKE_CASE , scale_attn_by_inverse_layer_idx=_SCREAMING_SNAKE_CASE , reorder_and_upcast_attn=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : str = GPTaLMHeadModel(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , ):
__lowerCAmelCase : Optional[Any] = self.transformer.transformer.wte(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = self.encode_prefix(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = self.decode_prefix(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
__lowerCAmelCase : Dict = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
__lowerCAmelCase : Tuple = torch.cat((dummy_token, input_ids) , dim=1 )
__lowerCAmelCase : List[Any] = self.transformer(inputs_embeds=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return torch.zeros(_SCREAMING_SNAKE_CASE , self.prefix_length , dtype=torch.intaa , device=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
return self.encode_prefix(_SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = torch.split(_SCREAMING_SNAKE_CASE , 1 , dim=0 )
__lowerCAmelCase : Dict = []
__lowerCAmelCase : Dict = []
for feature in features:
__lowerCAmelCase : Union[str, Any] = self.decode_prefix(feature.to(_SCREAMING_SNAKE_CASE ) ) # back to the clip feature
# Only support beam search for now
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.generate_beam(
input_embeds=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
__lowerCAmelCase : List[str] = torch.stack(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = torch.stack(_SCREAMING_SNAKE_CASE )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE = 5 , _SCREAMING_SNAKE_CASE = 67 , _SCREAMING_SNAKE_CASE = 1.0 , _SCREAMING_SNAKE_CASE = None , ):
__lowerCAmelCase : List[Any] = eos_token_id
__lowerCAmelCase : List[Any] = None
__lowerCAmelCase : Tuple = None
__lowerCAmelCase : Optional[int] = torch.ones(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , dtype=torch.int )
__lowerCAmelCase : Union[str, Any] = torch.zeros(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , dtype=torch.bool )
if input_embeds is not None:
__lowerCAmelCase : Union[str, Any] = input_embeds
else:
__lowerCAmelCase : str = self.transformer.transformer.wte(_SCREAMING_SNAKE_CASE )
for i in range(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = self.transformer(inputs_embeds=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = outputs.logits
__lowerCAmelCase : Union[str, Any] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
__lowerCAmelCase : Tuple = logits.softmax(-1 ).log()
if scores is None:
__lowerCAmelCase , __lowerCAmelCase : Tuple = logits.topk(_SCREAMING_SNAKE_CASE , -1 )
__lowerCAmelCase : Dict = generated.expand(_SCREAMING_SNAKE_CASE , *generated.shape[1:] )
__lowerCAmelCase , __lowerCAmelCase : int = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
__lowerCAmelCase : Tuple = next_tokens
else:
__lowerCAmelCase : Dict = tokens.expand(_SCREAMING_SNAKE_CASE , *tokens.shape[1:] )
__lowerCAmelCase : List[str] = torch.cat((tokens, next_tokens) , dim=1 )
else:
__lowerCAmelCase : List[str] = -float(np.inf )
__lowerCAmelCase : Union[str, Any] = 0
__lowerCAmelCase : Any = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
__lowerCAmelCase : Optional[Any] = scores_sum / seq_lengths[:, None]
__lowerCAmelCase , __lowerCAmelCase : List[Any] = scores_sum_average.view(-1 ).topk(_SCREAMING_SNAKE_CASE , -1 )
__lowerCAmelCase : List[Any] = next_tokens // scores_sum.shape[1]
__lowerCAmelCase : List[Any] = seq_lengths[next_tokens_source]
__lowerCAmelCase : Any = next_tokens % scores_sum.shape[1]
__lowerCAmelCase : Tuple = next_tokens.unsqueeze(1 )
__lowerCAmelCase : str = tokens[next_tokens_source]
__lowerCAmelCase : str = torch.cat((tokens, next_tokens) , dim=1 )
__lowerCAmelCase : List[str] = generated[next_tokens_source]
__lowerCAmelCase : Union[str, Any] = scores_sum_average * seq_lengths
__lowerCAmelCase : int = is_stopped[next_tokens_source]
__lowerCAmelCase : Optional[Any] = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
__lowerCAmelCase : int = torch.cat((generated, next_token_embed) , dim=1 )
__lowerCAmelCase : List[str] = is_stopped + next_tokens.eq(_SCREAMING_SNAKE_CASE ).squeeze()
if is_stopped.all():
break
__lowerCAmelCase : Optional[int] = scores / seq_lengths
__lowerCAmelCase : int = scores.argsort(descending=_SCREAMING_SNAKE_CASE )
# tokens tensors are already padded to max_seq_length
__lowerCAmelCase : Union[str, Any] = [tokens[i] for i in order]
__lowerCAmelCase : Union[str, Any] = torch.stack(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : Optional[Any] = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths | 86 |
"""simple docstring"""
from __future__ import annotations
import bisect
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
if hi < 0:
__lowerCAmelCase : Tuple = len(_UpperCamelCase )
while lo < hi:
__lowerCAmelCase : Optional[int] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__lowerCAmelCase : int = mid + 1
else:
__lowerCAmelCase : List[str] = mid
return lo
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
if hi < 0:
__lowerCAmelCase : List[Any] = len(_UpperCamelCase )
while lo < hi:
__lowerCAmelCase : Union[str, Any] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__lowerCAmelCase : Dict = mid + 1
else:
__lowerCAmelCase : str = mid
return lo
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
sorted_collection.insert(bisect_left(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
sorted_collection.insert(bisect_right(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : List[Any] = 0
__lowerCAmelCase : int = len(_UpperCamelCase ) - 1
while left <= right:
__lowerCAmelCase : List[Any] = left + (right - left) // 2
__lowerCAmelCase : Union[str, Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__lowerCAmelCase : Tuple = midpoint - 1
else:
__lowerCAmelCase : str = midpoint + 1
return None
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Union[str, Any] = bisect.bisect_left(_UpperCamelCase , _UpperCamelCase )
if index != len(_UpperCamelCase ) and sorted_collection[index] == item:
return index
return None
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if right < left:
return None
__lowerCAmelCase : List[str] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , midpoint - 1 )
else:
return binary_search_by_recursion(_UpperCamelCase , _UpperCamelCase , midpoint + 1 , _UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase__ = input("""Enter numbers separated by comma:\n""").strip()
lowerCamelCase__ = sorted(int(item) for item in user_input.split(""","""))
lowerCamelCase__ = int(input("""Enter a single number to be found in the list:\n"""))
lowerCamelCase__ = binary_search(collection, target)
if result is None:
print(f'{target} was not found in {collection}.')
else:
print(f'{target} was found at position {result} in {collection}.') | 86 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase__ = {
"""configuration_transfo_xl""": ["""TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """TransfoXLConfig"""],
"""tokenization_transfo_xl""": ["""TransfoXLCorpus""", """TransfoXLTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""AdaptiveEmbedding""",
"""TransfoXLForSequenceClassification""",
"""TransfoXLLMHeadModel""",
"""TransfoXLModel""",
"""TransfoXLPreTrainedModel""",
"""load_tf_weights_in_transfo_xl""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFAdaptiveEmbedding""",
"""TFTransfoXLForSequenceClassification""",
"""TFTransfoXLLMHeadModel""",
"""TFTransfoXLMainLayer""",
"""TFTransfoXLModel""",
"""TFTransfoXLPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 86 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class A__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : Tuple = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TFAutoModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = AutoModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = TFAutoModelForPreTraining.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = AutoModelForPreTraining.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TFAutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : List[str] = TFAutoModelForCausalLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = TFAutoModelForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : int = TFAutoModelForMaskedLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = AutoModelForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : str = AutoModelForMaskedLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = AutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Dict = AutoModelForSeqaSeqLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TFAutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = TFAutoModelForQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = AutoModelForQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
__lowerCAmelCase : Tuple = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
__lowerCAmelCase : Tuple = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 ) | 86 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = tempfile.mkdtemp()
# fmt: off
__lowerCAmelCase : List[Any] = ['', 'l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
__lowerCAmelCase : Tuple = dict(zip(_SCREAMING_SNAKE_CASE , range(len(_SCREAMING_SNAKE_CASE ) ) ) )
__lowerCAmelCase : Dict = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
__lowerCAmelCase : str = {'unk_token': '<unk>'}
__lowerCAmelCase : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
__lowerCAmelCase : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(_SCREAMING_SNAKE_CASE ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(_SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Optional[int] = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.4814_5466, 0.457_8275, 0.4082_1073],
'image_std': [0.2686_2954, 0.2613_0258, 0.2757_7711],
}
__lowerCAmelCase : Any = os.path.join(self.tmpdirname , _SCREAMING_SNAKE_CASE )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , **_SCREAMING_SNAKE_CASE ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , pad_token='!' , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , **_SCREAMING_SNAKE_CASE ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , pad_token='!' , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , **_SCREAMING_SNAKE_CASE ):
return OwlViTImageProcessor.from_pretrained(self.tmpdirname , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
shutil.rmtree(self.tmpdirname )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
__lowerCAmelCase : str = [Image.fromarray(np.moveaxis(_SCREAMING_SNAKE_CASE , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.get_tokenizer()
__lowerCAmelCase : List[Any] = self.get_rust_tokenizer()
__lowerCAmelCase : Any = self.get_image_processor()
__lowerCAmelCase : List[str] = OwlViTProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
processor_slow.save_pretrained(self.tmpdirname )
__lowerCAmelCase : Dict = OwlViTProcessor.from_pretrained(self.tmpdirname , use_fast=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = OwlViTProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
processor_fast.save_pretrained(self.tmpdirname )
__lowerCAmelCase : List[str] = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _SCREAMING_SNAKE_CASE )
self.assertIsInstance(processor_fast.image_processor , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = OwlViTProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__lowerCAmelCase : List[str] = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
__lowerCAmelCase : int = self.get_image_processor(do_normalize=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = OwlViTProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=_SCREAMING_SNAKE_CASE )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _SCREAMING_SNAKE_CASE )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = self.get_image_processor()
__lowerCAmelCase : List[Any] = self.get_tokenizer()
__lowerCAmelCase : Optional[Any] = OwlViTProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = self.prepare_image_inputs()
__lowerCAmelCase : str = image_processor(_SCREAMING_SNAKE_CASE , return_tensors='np' )
__lowerCAmelCase : List[Any] = processor(images=_SCREAMING_SNAKE_CASE , return_tensors='np' )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1E-2 )
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = self.get_image_processor()
__lowerCAmelCase : int = self.get_tokenizer()
__lowerCAmelCase : str = OwlViTProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = 'lower newer'
__lowerCAmelCase : Optional[Any] = processor(text=_SCREAMING_SNAKE_CASE , return_tensors='np' )
__lowerCAmelCase : int = tokenizer(_SCREAMING_SNAKE_CASE , return_tensors='np' )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() , encoded_processor[key][0].tolist() )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.get_image_processor()
__lowerCAmelCase : Union[str, Any] = self.get_tokenizer()
__lowerCAmelCase : Any = OwlViTProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = 'lower newer'
__lowerCAmelCase : int = self.prepare_image_inputs()
__lowerCAmelCase : str = processor(text=_SCREAMING_SNAKE_CASE , images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_SCREAMING_SNAKE_CASE ):
processor()
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = 'google/owlvit-base-patch32'
__lowerCAmelCase : Optional[int] = OwlViTProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = ['cat', 'nasa badge']
__lowerCAmelCase : int = processor(text=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = 16
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_SCREAMING_SNAKE_CASE ):
processor()
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = 'google/owlvit-base-patch32'
__lowerCAmelCase : List[Any] = OwlViTProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = [['cat', 'nasa badge'], ['person']]
__lowerCAmelCase : Union[str, Any] = processor(text=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = 16
__lowerCAmelCase : str = len(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = max([len(_SCREAMING_SNAKE_CASE ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(_SCREAMING_SNAKE_CASE ):
processor()
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = 'google/owlvit-base-patch32'
__lowerCAmelCase : Tuple = OwlViTProcessor.from_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = ['cat', 'nasa badge']
__lowerCAmelCase : Optional[int] = processor(text=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = 16
__lowerCAmelCase : Union[str, Any] = inputs['input_ids']
__lowerCAmelCase : Union[str, Any] = [
[4_94_06, 23_68, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_94_06, 68_41, 1_13_01, 4_94_07, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'attention_mask'] )
self.assertEqual(inputs['input_ids'].shape , (2, seq_length) )
self.assertListEqual(list(input_ids[0] ) , predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) , predicted_ids[1] )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.get_image_processor()
__lowerCAmelCase : Any = self.get_tokenizer()
__lowerCAmelCase : Union[str, Any] = OwlViTProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = self.prepare_image_inputs()
__lowerCAmelCase : List[Any] = self.prepare_image_inputs()
__lowerCAmelCase : int = processor(images=_SCREAMING_SNAKE_CASE , query_images=_SCREAMING_SNAKE_CASE )
self.assertListEqual(list(inputs.keys() ) , ['query_pixel_values', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(_SCREAMING_SNAKE_CASE ):
processor()
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.get_image_processor()
__lowerCAmelCase : Tuple = self.get_tokenizer()
__lowerCAmelCase : Any = OwlViTProcessor(tokenizer=_SCREAMING_SNAKE_CASE , image_processor=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__lowerCAmelCase : List[str] = processor.batch_decode(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) | 86 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
lowerCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase__ = """
Examples:
```py
>>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline
>>> import torch
>>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> negative_image_emb = out.negative_image_embeds
>>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... prompt,
... image_embeds=image_emb,
... negative_image_embeds=negative_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=8 ):
__lowerCAmelCase : Dict = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
__lowerCAmelCase : List[str] = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class A__ ( _lowerCamelCase):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
super().__init__()
self.register_modules(
text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , movq=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if latents is None:
__lowerCAmelCase : Tuple = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
__lowerCAmelCase : Any = latents.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = latents * scheduler.init_noise_sigma
return latents
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , ):
__lowerCAmelCase : Tuple = len(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else 1
# get prompt text embeddings
__lowerCAmelCase : Dict = self.tokenizer(
_SCREAMING_SNAKE_CASE , padding='max_length' , truncation=_SCREAMING_SNAKE_CASE , max_length=77 , return_attention_mask=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors='pt' , )
__lowerCAmelCase : Tuple = text_inputs.input_ids
__lowerCAmelCase : Union[str, Any] = self.tokenizer(_SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
__lowerCAmelCase : Dict = text_input_ids.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = text_inputs.attention_mask.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : str = self.text_encoder(
input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = prompt_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : Dict = text_encoder_hidden_states.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : Optional[int] = text_mask.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
__lowerCAmelCase : List[str]
if negative_prompt is None:
__lowerCAmelCase : Union[str, Any] = [''] * batch_size
elif type(_SCREAMING_SNAKE_CASE ) is not type(_SCREAMING_SNAKE_CASE ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(_SCREAMING_SNAKE_CASE )} !="
f" {type(_SCREAMING_SNAKE_CASE )}." )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = [negative_prompt]
elif batch_size != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(_SCREAMING_SNAKE_CASE )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.' )
else:
__lowerCAmelCase : Optional[int] = negative_prompt
__lowerCAmelCase : Tuple = self.tokenizer(
_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=77 , truncation=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors='pt' , )
__lowerCAmelCase : Union[str, Any] = uncond_input.input_ids.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = uncond_input.attention_mask.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Any = self.text_encoder(
input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__lowerCAmelCase : List[str] = negative_prompt_embeds.shape[1]
__lowerCAmelCase : Any = negative_prompt_embeds.repeat(1 , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = uncond_text_encoder_hidden_states.shape[1]
__lowerCAmelCase : List[Any] = uncond_text_encoder_hidden_states.repeat(1 , _SCREAMING_SNAKE_CASE , 1 )
__lowerCAmelCase : Optional[int] = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , _SCREAMING_SNAKE_CASE , -1 )
__lowerCAmelCase : Optional[Any] = uncond_text_mask.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowerCAmelCase : Tuple = torch.cat([negative_prompt_embeds, prompt_embeds] )
__lowerCAmelCase : Tuple = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
__lowerCAmelCase : int = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
__lowerCAmelCase : Union[str, Any] = torch.device(f"cuda:{gpu_id}" )
__lowerCAmelCase : List[Any] = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=0 ):
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
__lowerCAmelCase : str = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__lowerCAmelCase : Any = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
__lowerCAmelCase , __lowerCAmelCase : Any = cpu_offload_with_hook(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , prev_module_hook=_SCREAMING_SNAKE_CASE )
if self.safety_checker is not None:
__lowerCAmelCase , __lowerCAmelCase : Dict = cpu_offload_with_hook(self.safety_checker , _SCREAMING_SNAKE_CASE , prev_module_hook=_SCREAMING_SNAKE_CASE )
# We'll offload the last model manually.
__lowerCAmelCase : Optional[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCamelCase ( self ):
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_SCREAMING_SNAKE_CASE , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 5_12 , _SCREAMING_SNAKE_CASE = 5_12 , _SCREAMING_SNAKE_CASE = 1_00 , _SCREAMING_SNAKE_CASE = 4.0 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = 1
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = len(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(_SCREAMING_SNAKE_CASE )}" )
__lowerCAmelCase : Dict = self._execution_device
__lowerCAmelCase : Optional[Any] = batch_size * num_images_per_prompt
__lowerCAmelCase : Optional[int] = guidance_scale > 1.0
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : int = self._encode_prompt(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
__lowerCAmelCase : Optional[Any] = image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : int = negative_image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : Any = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=_SCREAMING_SNAKE_CASE )
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = self.scheduler.timesteps
__lowerCAmelCase : int = self.unet.config.in_channels
__lowerCAmelCase , __lowerCAmelCase : Any = get_new_h_w(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.movq_scale_factor )
# create initial latent
__lowerCAmelCase : str = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.scheduler , )
for i, t in enumerate(self.progress_bar(_SCREAMING_SNAKE_CASE ) ):
# expand the latents if we are doing classifier free guidance
__lowerCAmelCase : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCAmelCase : Union[str, Any] = {'text_embeds': prompt_embeds, 'image_embeds': image_embeds}
__lowerCAmelCase : Optional[Any] = self.unet(
sample=_SCREAMING_SNAKE_CASE , timestep=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , added_cond_kwargs=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )[0]
if do_classifier_free_guidance:
__lowerCAmelCase , __lowerCAmelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 )
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = noise_pred.chunk(2 )
__lowerCAmelCase , __lowerCAmelCase : int = variance_pred.chunk(2 )
__lowerCAmelCase : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__lowerCAmelCase : Any = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCAmelCase : List[str] = self.scheduler.step(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , ).prev_sample
# post-processing
__lowerCAmelCase : Tuple = self.movq.decode(_SCREAMING_SNAKE_CASE , force_not_quantize=_SCREAMING_SNAKE_CASE )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
__lowerCAmelCase : List[str] = image * 0.5 + 0.5
__lowerCAmelCase : Dict = image.clamp(0 , 1 )
__lowerCAmelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowerCAmelCase : Union[str, Any] = self.numpy_to_pil(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_SCREAMING_SNAKE_CASE ) | 86 | 1 |
"""simple docstring"""
import argparse
import os
import re
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_dummies.py
lowerCamelCase__ = """src/diffusers"""
# Matches is_xxx_available()
lowerCamelCase__ = re.compile(R"""is\_([a-z_]*)_available\(\)""")
# Matches from xxx import bla
lowerCamelCase__ = re.compile(R"""\s+from\s+\S*\s+import\s+([^\(\s].*)\n""")
lowerCamelCase__ = """
{0} = None
"""
lowerCamelCase__ = """
class {0}(metaclass=DummyObject):
_backends = {1}
def __init__(self, *args, **kwargs):
requires_backends(self, {1})
@classmethod
def from_config(cls, *args, **kwargs):
requires_backends(cls, {1})
@classmethod
def from_pretrained(cls, *args, **kwargs):
requires_backends(cls, {1})
"""
lowerCamelCase__ = """
def {0}(*args, **kwargs):
requires_backends({0}, {1})
"""
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : List[str] = _re_backend.findall(_UpperCamelCase )
if len(_UpperCamelCase ) == 0:
return None
return "_and_".join(_UpperCamelCase )
def __lowerCAmelCase ():
with open(os.path.join(_UpperCamelCase , '__init__.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
__lowerCAmelCase : Union[str, Any] = f.readlines()
# Get to the point we do the actual imports for type checking
__lowerCAmelCase : List[Any] = 0
__lowerCAmelCase : Dict = {}
# Go through the end of the file
while line_index < len(_UpperCamelCase ):
# If the line contains is_backend_available, we grab all objects associated with the `else` block
__lowerCAmelCase : Tuple = find_backend(lines[line_index] )
if backend is not None:
while not lines[line_index].startswith('else:' ):
line_index += 1
line_index += 1
__lowerCAmelCase : Any = []
# Until we unindent, add backend objects to the list
while line_index < len(_UpperCamelCase ) and len(lines[line_index] ) > 1:
__lowerCAmelCase : Optional[int] = lines[line_index]
__lowerCAmelCase : Optional[int] = _re_single_line_import.search(_UpperCamelCase )
if single_line_import_search is not None:
objects.extend(single_line_import_search.groups()[0].split(', ' ) )
elif line.startswith(' ' * 8 ):
objects.append(line[8:-2] )
line_index += 1
if len(_UpperCamelCase ) > 0:
__lowerCAmelCase : List[Any] = objects
else:
line_index += 1
return backend_specific_objects
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
if name.isupper():
return DUMMY_CONSTANT.format(_UpperCamelCase )
elif name.islower():
return DUMMY_FUNCTION.format(_UpperCamelCase , _UpperCamelCase )
else:
return DUMMY_CLASS.format(_UpperCamelCase , _UpperCamelCase )
def __lowerCAmelCase (_UpperCamelCase=None ):
if backend_specific_objects is None:
__lowerCAmelCase : Dict = read_init()
# For special correspondence backend to module name as used in the function requires_modulename
__lowerCAmelCase : Any = {}
for backend, objects in backend_specific_objects.items():
__lowerCAmelCase : List[Any] = '[' + ', '.join(F"\"{b}\"" for b in backend.split('_and_' ) ) + ']'
__lowerCAmelCase : Optional[int] = '# This file is autogenerated by the command `make fix-copies`, do not edit.\n'
dummy_file += "from ..utils import DummyObject, requires_backends\n\n"
dummy_file += "\n".join([create_dummy_object(_UpperCamelCase , _UpperCamelCase ) for o in objects] )
__lowerCAmelCase : Optional[Any] = dummy_file
return dummy_files
def __lowerCAmelCase (_UpperCamelCase=False ):
__lowerCAmelCase : str = create_dummy_files()
# For special correspondence backend to shortcut as used in utils/dummy_xxx_objects.py
__lowerCAmelCase : Optional[Any] = {'torch': 'pt'}
# Locate actual dummy modules and read their content.
__lowerCAmelCase : Dict = os.path.join(_UpperCamelCase , 'utils' )
__lowerCAmelCase : Dict = {
backend: os.path.join(_UpperCamelCase , F"dummy_{short_names.get(_UpperCamelCase , _UpperCamelCase )}_objects.py" )
for backend in dummy_files.keys()
}
__lowerCAmelCase : str = {}
for backend, file_path in dummy_file_paths.items():
if os.path.isfile(_UpperCamelCase ):
with open(_UpperCamelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
__lowerCAmelCase : Union[str, Any] = f.read()
else:
__lowerCAmelCase : List[str] = ''
for backend in dummy_files.keys():
if dummy_files[backend] != actual_dummies[backend]:
if overwrite:
print(
F"Updating diffusers.utils.dummy_{short_names.get(_UpperCamelCase , _UpperCamelCase )}_objects.py as the main "
'__init__ has new objects.' )
with open(dummy_file_paths[backend] , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(dummy_files[backend] )
else:
raise ValueError(
'The main __init__ has objects that are not present in '
F"diffusers.utils.dummy_{short_names.get(_UpperCamelCase , _UpperCamelCase )}_objects.py. Run `make fix-copies` "
'to fix this.' )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
lowerCamelCase__ = parser.parse_args()
check_dummies(args.fix_and_overwrite) | 86 |
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : Union[str, Any] = BarthezTokenizer
A_ : Tuple = BarthezTokenizerFast
A_ : Dict = True
A_ : List[str] = True
def __lowerCamelCase ( self ):
super().setUp()
__lowerCAmelCase : str = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = tokenizer
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = '<pad>'
__lowerCAmelCase : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 10_11_22 )
def __lowerCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_11_22 )
@require_torch
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__lowerCAmelCase : Optional[Any] = [0, 57, 30_18, 7_03_07, 91, 2]
__lowerCAmelCase : Optional[int] = self.tokenizer(
_SCREAMING_SNAKE_CASE , max_length=len(_SCREAMING_SNAKE_CASE ) , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__lowerCAmelCase : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
if not self.test_rust_tokenizer:
return
__lowerCAmelCase : Tuple = self.get_tokenizer()
__lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
__lowerCAmelCase : List[str] = 'I was born in 92000, and this is falsé.'
__lowerCAmelCase : Optional[int] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
__lowerCAmelCase : List[Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# fmt: off
__lowerCAmelCase : str = {'input_ids': [[0, 4_90, 1_43_28, 45_07, 3_54, 47, 4_36_69, 95, 25, 7_81_17, 2_02_15, 1_97_79, 1_90, 22, 4_00, 4, 3_53_43, 8_03_10, 6_03, 86, 2_49_37, 1_05, 3_34_38, 9_47_62, 1_96, 3_96_42, 7, 15, 1_59_33, 1_73, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_05_34, 87, 25, 66, 33_58, 1_96, 5_52_89, 8, 8_29_61, 81, 22_04, 7_52_03, 7, 15, 7_63, 1_29_56, 2_16, 1_78, 1_43_28, 95_95, 13_77, 6_96_93, 7, 4_48, 7_10_21, 1_96, 1_81_06, 14_37, 1_39_74, 1_08, 90_83, 4, 4_93_15, 7, 39, 86, 13_26, 27_93, 4_63_33, 4, 4_48, 1_96, 7_45_88, 7, 4_93_15, 7, 39, 21, 8_22, 3_84_70, 74, 21, 6_67_23, 6_24_80, 8, 2_20_50, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__lowerCAmelCase : Union[str, Any] = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=_SCREAMING_SNAKE_CASE , ) | 86 | 1 |
"""simple docstring"""
import os
import socket
from contextlib import contextmanager
import torch
from ..commands.config.default import write_basic_config # noqa: F401
from ..state import PartialState
from .dataclasses import DistributedType
from .imports import is_deepspeed_available, is_tpu_available
from .transformer_engine import convert_model
from .versions import is_torch_version
if is_deepspeed_available():
from deepspeed import DeepSpeedEngine
if is_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
def __lowerCAmelCase (_UpperCamelCase ):
if is_torch_version('<' , '2.0.0' ) or not hasattr(_UpperCamelCase , '_dynamo' ):
return False
return isinstance(_UpperCamelCase , torch._dynamo.eval_frame.OptimizedModule )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase = True ):
__lowerCAmelCase : List[str] = (torch.nn.parallel.DistributedDataParallel, torch.nn.DataParallel)
__lowerCAmelCase : str = is_compiled_module(_UpperCamelCase )
if is_compiled:
__lowerCAmelCase : List[str] = model
__lowerCAmelCase : Optional[int] = model._orig_mod
if is_deepspeed_available():
options += (DeepSpeedEngine,)
while isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : List[Any] = model.module
if not keep_fpaa_wrapper:
__lowerCAmelCase : Optional[Any] = getattr(_UpperCamelCase , 'forward' )
__lowerCAmelCase : List[str] = model.__dict__.pop('_original_forward' , _UpperCamelCase )
if original_forward is not None:
while hasattr(_UpperCamelCase , '__wrapped__' ):
__lowerCAmelCase : List[str] = forward.__wrapped__
if forward == original_forward:
break
__lowerCAmelCase : Dict = forward
if getattr(_UpperCamelCase , '_converted_to_transformer_engine' , _UpperCamelCase ):
convert_model(_UpperCamelCase , to_transformer_engine=_UpperCamelCase )
if is_compiled:
__lowerCAmelCase : Dict = model
__lowerCAmelCase : Tuple = compiled_model
return model
def __lowerCAmelCase ():
PartialState().wait_for_everyone()
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
if PartialState().distributed_type == DistributedType.TPU:
xm.save(_UpperCamelCase , _UpperCamelCase )
elif PartialState().local_process_index == 0:
torch.save(_UpperCamelCase , _UpperCamelCase )
@contextmanager
def __lowerCAmelCase (**_UpperCamelCase ):
for key, value in kwargs.items():
__lowerCAmelCase : List[str] = str(_UpperCamelCase )
yield
for key in kwargs:
if key.upper() in os.environ:
del os.environ[key.upper()]
def __lowerCAmelCase (_UpperCamelCase ):
if not hasattr(_UpperCamelCase , '__qualname__' ) and not hasattr(_UpperCamelCase , '__name__' ):
__lowerCAmelCase : Optional[Any] = getattr(_UpperCamelCase , '__class__' , _UpperCamelCase )
if hasattr(_UpperCamelCase , '__qualname__' ):
return obj.__qualname__
if hasattr(_UpperCamelCase , '__name__' ):
return obj.__name__
return str(_UpperCamelCase )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
for key, value in source.items():
if isinstance(_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Optional[Any] = destination.setdefault(_UpperCamelCase , {} )
merge_dicts(_UpperCamelCase , _UpperCamelCase )
else:
__lowerCAmelCase : Optional[Any] = value
return destination
def __lowerCAmelCase (_UpperCamelCase = None ):
if port is None:
__lowerCAmelCase : Tuple = 2_9500
with socket.socket(socket.AF_INET , socket.SOCK_STREAM ) as s:
return s.connect_ex(('localhost', port) ) == 0 | 86 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class A__ ( _lowerCamelCase):
A_ : Optional[int] = 'poolformer'
def __init__( self , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4.0 , _SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , _SCREAMING_SNAKE_CASE=[64, 1_28, 3_20, 5_12] , _SCREAMING_SNAKE_CASE=[7, 3, 3, 3] , _SCREAMING_SNAKE_CASE=[4, 2, 2, 2] , _SCREAMING_SNAKE_CASE=[2, 1, 1, 1] , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE=0.02 , **_SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : int = num_channels
__lowerCAmelCase : str = patch_size
__lowerCAmelCase : Optional[Any] = stride
__lowerCAmelCase : Optional[int] = padding
__lowerCAmelCase : List[Any] = pool_size
__lowerCAmelCase : int = hidden_sizes
__lowerCAmelCase : str = mlp_ratio
__lowerCAmelCase : Optional[int] = depths
__lowerCAmelCase : str = patch_sizes
__lowerCAmelCase : str = strides
__lowerCAmelCase : Optional[int] = num_encoder_blocks
__lowerCAmelCase : Any = drop_path_rate
__lowerCAmelCase : Any = hidden_act
__lowerCAmelCase : Dict = use_layer_scale
__lowerCAmelCase : Union[str, Any] = layer_scale_init_value
__lowerCAmelCase : Dict = initializer_range
super().__init__(**_SCREAMING_SNAKE_CASE )
class A__ ( _lowerCamelCase):
A_ : List[str] = version.parse('1.11')
@property
def __lowerCamelCase ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __lowerCamelCase ( self ):
return 2E-3 | 86 | 1 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase ):
if not head:
return True
# split the list to two parts
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = head.next, head
while fast and fast.next:
__lowerCAmelCase : Any = fast.next.next
__lowerCAmelCase : Any = slow.next
__lowerCAmelCase : Optional[Any] = slow.next
__lowerCAmelCase : Tuple = None # Don't forget here! But forget still works!
# reverse the second part
__lowerCAmelCase : Union[str, Any] = None
while second:
__lowerCAmelCase : List[Any] = second.next
__lowerCAmelCase : Optional[Any] = node
__lowerCAmelCase : List[str] = second
__lowerCAmelCase : int = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
__lowerCAmelCase : Any = node.next
__lowerCAmelCase : Any = head.next
return True
def __lowerCAmelCase (_UpperCamelCase ):
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
__lowerCAmelCase : Optional[int] = head
while fast and fast.next:
__lowerCAmelCase , __lowerCAmelCase : List[str] = fast.next.next, slow.next
# 2. Push the second half into the stack
__lowerCAmelCase : Tuple = [slow.val]
while slow.next:
__lowerCAmelCase : Dict = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
__lowerCAmelCase : Tuple = cur.next
return True
def __lowerCAmelCase (_UpperCamelCase ):
if not head or not head.next:
return True
__lowerCAmelCase : Tuple = {}
__lowerCAmelCase : Optional[Any] = 0
while head:
if head.val in d:
d[head.val].append(_UpperCamelCase )
else:
__lowerCAmelCase : Dict = [pos]
__lowerCAmelCase : int = head.next
pos += 1
__lowerCAmelCase : Union[str, Any] = pos - 1
__lowerCAmelCase : List[Any] = 0
for v in d.values():
if len(_UpperCamelCase ) % 2 != 0:
middle += 1
else:
__lowerCAmelCase : List[str] = 0
for i in range(0 , len(_UpperCamelCase ) ):
if v[i] + v[len(_UpperCamelCase ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True | 86 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : Union[str, Any] = DiTPipeline
A_ : Union[str, Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
A_ : List[Any] = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
A_ : Optional[Any] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
A_ : Tuple = False
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : List[str] = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_SCREAMING_SNAKE_CASE , activation_fn='gelu-approximate' , num_embeds_ada_norm=10_00 , norm_type='ada_norm_zero' , norm_elementwise_affine=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : str = AutoencoderKL()
__lowerCAmelCase : Union[str, Any] = DDIMScheduler()
__lowerCAmelCase : Dict = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
__lowerCAmelCase : List[str] = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : List[str] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = 'cpu'
__lowerCAmelCase : Any = self.get_dummy_components()
__lowerCAmelCase : Union[str, Any] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = pipe(**_SCREAMING_SNAKE_CASE ).images
__lowerCAmelCase : List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
__lowerCAmelCase : Optional[int] = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
__lowerCAmelCase : List[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 )
def __lowerCamelCase ( self ):
self._test_inference_batch_single_identical(relax_max_difference=_SCREAMING_SNAKE_CASE , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __lowerCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = torch.manual_seed(0 )
__lowerCAmelCase : int = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
__lowerCAmelCase : Optional[Any] = ['vase', 'umbrella', 'white shark', 'white wolf']
__lowerCAmelCase : Optional[Any] = pipe.get_label_ids(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=40 , output_type='np' ).images
for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = load_numpy(
f"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" )
assert np.abs((expected_image - image).max() ) < 1E-2
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
__lowerCAmelCase : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
__lowerCAmelCase : Dict = ['vase', 'umbrella']
__lowerCAmelCase : List[str] = pipe.get_label_ids(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = torch.manual_seed(0 )
__lowerCAmelCase : Optional[Any] = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type='np' ).images
for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
f"/dit/{word}_512.npy" )
assert np.abs((expected_image - image).max() ) < 1E-1 | 86 | 1 |
"""simple docstring"""
from collections.abc import Iterable
from typing import Generic, TypeVar
lowerCamelCase__ = TypeVar("""_T""")
class A__ ( Generic[_T]):
def __init__( self , _SCREAMING_SNAKE_CASE = None ):
__lowerCAmelCase : list[_T] = list(iterable or [] )
__lowerCAmelCase : list[_T] = []
def __len__( self ):
return len(self._stacka ) + len(self._stacka )
def __repr__( self ):
return f"Queue({tuple(self._stacka[::-1] + self._stacka )})"
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
self._stacka.append(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = self._stacka.pop
__lowerCAmelCase : List[Any] = self._stacka.append
if not self._stacka:
while self._stacka:
stacka_append(stacka_pop() )
if not self._stacka:
raise IndexError('Queue is empty' )
return self._stacka.pop()
if __name__ == "__main__":
from doctest import testmod
testmod() | 86 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : str = ShapEImgaImgPipeline
A_ : str = ['image']
A_ : int = ['image']
A_ : Tuple = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
A_ : Tuple = False
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return self.time_input_dim * 4
@property
def __lowerCamelCase ( self ):
return 8
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Any = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
__lowerCAmelCase : Tuple = CLIPVisionModel(_SCREAMING_SNAKE_CASE )
return model
@property
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = CLIPImageProcessor(
crop_size=2_24 , do_center_crop=_SCREAMING_SNAKE_CASE , do_normalize=_SCREAMING_SNAKE_CASE , do_resize=_SCREAMING_SNAKE_CASE , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_24 , )
return image_processor
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Optional[Any] = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__lowerCAmelCase : List[Any] = PriorTransformer(**_SCREAMING_SNAKE_CASE )
return model
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Dict = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__lowerCAmelCase : int = ShapERenderer(**_SCREAMING_SNAKE_CASE )
return model
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.dummy_prior
__lowerCAmelCase : List[Any] = self.dummy_image_encoder
__lowerCAmelCase : int = self.dummy_image_processor
__lowerCAmelCase : Any = self.dummy_renderer
__lowerCAmelCase : Any = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=10_24 , prediction_type='sample' , use_karras_sigmas=_SCREAMING_SNAKE_CASE , clip_sample=_SCREAMING_SNAKE_CASE , clip_sample_range=1.0 , )
__lowerCAmelCase : Tuple = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
__lowerCAmelCase : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
__lowerCAmelCase : int = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : str = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = 'cpu'
__lowerCAmelCase : Dict = self.get_dummy_components()
__lowerCAmelCase : Optional[int] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = pipe(**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Any = output.images[0]
__lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__lowerCAmelCase : List[Any] = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = torch_device == 'cpu'
__lowerCAmelCase : Optional[Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_SCREAMING_SNAKE_CASE , relax_max_difference=_SCREAMING_SNAKE_CASE , )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.get_dummy_components()
__lowerCAmelCase : List[str] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = 1
__lowerCAmelCase : List[str] = 2
__lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
for key in inputs.keys():
if key in self.batch_params:
__lowerCAmelCase : Optional[Any] = batch_size * [inputs[key]]
__lowerCAmelCase : List[str] = pipe(**_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' )
__lowerCAmelCase : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy' )
__lowerCAmelCase : Union[str, Any] = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' )
__lowerCAmelCase : Dict = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
__lowerCAmelCase : int = pipe(
_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) | 86 | 1 |
"""simple docstring"""
from __future__ import annotations
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = order
# a_{0} ... a_{k}
__lowerCAmelCase : Optional[int] = [1.0] + [0.0] * order
# b_{0} ... b_{k}
__lowerCAmelCase : Dict = [1.0] + [0.0] * order
# x[n-1] ... x[n-k]
__lowerCAmelCase : Union[str, Any] = [0.0] * self.order
# y[n-1] ... y[n-k]
__lowerCAmelCase : List[Any] = [0.0] * self.order
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if len(_SCREAMING_SNAKE_CASE ) < self.order:
__lowerCAmelCase : int = [1.0, *a_coeffs]
if len(_SCREAMING_SNAKE_CASE ) != self.order + 1:
__lowerCAmelCase : Any = (
f"Expected a_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(_SCREAMING_SNAKE_CASE )}"
)
raise ValueError(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) != self.order + 1:
__lowerCAmelCase : int = (
f"Expected b_coeffs to have {self.order + 1} elements "
f"for {self.order}-order filter, got {len(_SCREAMING_SNAKE_CASE )}"
)
raise ValueError(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = a_coeffs
__lowerCAmelCase : Optional[int] = b_coeffs
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = 0.0
# Start at index 1 and do index 0 at the end.
for i in range(1 , self.order + 1 ):
result += (
self.b_coeffs[i] * self.input_history[i - 1]
- self.a_coeffs[i] * self.output_history[i - 1]
)
__lowerCAmelCase : Union[str, Any] = (result + self.b_coeffs[0] * sample) / self.a_coeffs[0]
__lowerCAmelCase : str = self.input_history[:-1]
__lowerCAmelCase : int = self.output_history[:-1]
__lowerCAmelCase : List[Any] = sample
__lowerCAmelCase : str = result
return result | 86 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""FNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""FNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""FNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FNetForMaskedLM""",
"""FNetForMultipleChoice""",
"""FNetForNextSentencePrediction""",
"""FNetForPreTraining""",
"""FNetForQuestionAnswering""",
"""FNetForSequenceClassification""",
"""FNetForTokenClassification""",
"""FNetLayer""",
"""FNetModel""",
"""FNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 86 | 1 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase ):
if n == 1 or not isinstance(_UpperCamelCase , _UpperCamelCase ):
return 0
elif n == 2:
return 1
else:
__lowerCAmelCase : Optional[int] = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Dict = 0
__lowerCAmelCase : Tuple = 2
while digits < n:
index += 1
__lowerCAmelCase : List[str] = len(str(fibonacci(_UpperCamelCase ) ) )
return index
def __lowerCAmelCase (_UpperCamelCase = 1000 ):
return fibonacci_digits_index(_UpperCamelCase )
if __name__ == "__main__":
print(solution(int(str(input()).strip()))) | 86 |
"""simple docstring"""
import math
import sys
def __lowerCAmelCase (_UpperCamelCase ):
if number != int(_UpperCamelCase ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
__lowerCAmelCase : Any = [-1] * (number + 1)
__lowerCAmelCase : List[Any] = 0
for i in range(1 , number + 1 ):
__lowerCAmelCase : List[Any] = sys.maxsize
__lowerCAmelCase : Optional[int] = int(math.sqrt(_UpperCamelCase ) )
for j in range(1 , root + 1 ):
__lowerCAmelCase : Optional[Any] = 1 + answers[i - (j**2)]
__lowerCAmelCase : Any = min(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : List[str] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 | 1 |
"""simple docstring"""
import numpy as np
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.layers import LSTM, Dense
from tensorflow.keras.models import Sequential
if __name__ == "__main__":
lowerCamelCase__ = pd.read_csv("""sample_data.csv""", header=None)
lowerCamelCase__ = df.shape[:1][0]
# If you're using some other dataset input the target column
lowerCamelCase__ = df.iloc[:, 1:2]
lowerCamelCase__ = actual_data.values.reshape(len_data, 1)
lowerCamelCase__ = MinMaxScaler().fit_transform(actual_data)
lowerCamelCase__ = 10
lowerCamelCase__ = 5
lowerCamelCase__ = 20
lowerCamelCase__ = len_data - periods * look_back
lowerCamelCase__ = actual_data[:division]
lowerCamelCase__ = actual_data[division - look_back :]
lowerCamelCase__ , lowerCamelCase__ = [], []
lowerCamelCase__ , lowerCamelCase__ = [], []
for i in range(0, len(train_data) - forward_days - look_back + 1):
train_x.append(train_data[i : i + look_back])
train_y.append(train_data[i + look_back : i + look_back + forward_days])
for i in range(0, len(test_data) - forward_days - look_back + 1):
test_x.append(test_data[i : i + look_back])
test_y.append(test_data[i + look_back : i + look_back + forward_days])
lowerCamelCase__ = np.array(train_x)
lowerCamelCase__ = np.array(test_x)
lowerCamelCase__ = np.array([list(i.ravel()) for i in train_y])
lowerCamelCase__ = np.array([list(i.ravel()) for i in test_y])
lowerCamelCase__ = Sequential()
model.add(LSTM(128, input_shape=(look_back, 1), return_sequences=True))
model.add(LSTM(64, input_shape=(128, 1)))
model.add(Dense(forward_days))
model.compile(loss="""mean_squared_error""", optimizer="""adam""")
lowerCamelCase__ = model.fit(
x_train, y_train, epochs=150, verbose=1, shuffle=True, batch_size=4
)
lowerCamelCase__ = model.predict(x_test) | 86 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=14 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=0.02 , ):
__lowerCAmelCase : Union[str, Any] = parent
__lowerCAmelCase : Any = batch_size
__lowerCAmelCase : Any = seq_length
__lowerCAmelCase : Optional[Any] = is_training
__lowerCAmelCase : Any = use_input_mask
__lowerCAmelCase : Any = use_token_type_ids
__lowerCAmelCase : Tuple = use_labels
__lowerCAmelCase : Optional[Any] = vocab_size
__lowerCAmelCase : Tuple = hidden_size
__lowerCAmelCase : str = rotary_dim
__lowerCAmelCase : Union[str, Any] = num_hidden_layers
__lowerCAmelCase : Union[str, Any] = num_attention_heads
__lowerCAmelCase : int = intermediate_size
__lowerCAmelCase : List[str] = hidden_act
__lowerCAmelCase : int = hidden_dropout_prob
__lowerCAmelCase : Any = attention_probs_dropout_prob
__lowerCAmelCase : List[Any] = max_position_embeddings
__lowerCAmelCase : Optional[Any] = initializer_range
__lowerCAmelCase : Tuple = None
__lowerCAmelCase : int = vocab_size - 1
__lowerCAmelCase : Dict = vocab_size - 1
__lowerCAmelCase : int = vocab_size - 1
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : List[str] = None
if self.use_input_mask:
__lowerCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : Optional[int] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=_SCREAMING_SNAKE_CASE , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = config_and_inputs
__lowerCAmelCase : Dict = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = 20
__lowerCAmelCase : List[str] = model_class_name(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = model.init_cache(input_ids.shape[0] , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' )
__lowerCAmelCase : Optional[Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCAmelCase : Any = model(
input_ids[:, :-1] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCAmelCase : int = model(
input_ids[:, -1:] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = 20
__lowerCAmelCase : List[str] = model_class_name(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__lowerCAmelCase : List[str] = model.init_cache(input_ids.shape[0] , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCAmelCase : Optional[Any] = model(
input_ids[:, :-1] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : str = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCAmelCase : Tuple = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
@require_flax
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : Tuple = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
A_ : str = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = FlaxGPTJModelTester(self )
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@tooslow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' )
__lowerCAmelCase : Optional[int] = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
__lowerCAmelCase : Any = False
__lowerCAmelCase : Any = model.config.eos_token_id
__lowerCAmelCase : Union[str, Any] = jax.jit(model.generate )
__lowerCAmelCase : Optional[Any] = jit_generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences
__lowerCAmelCase : str = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@is_pt_flax_cross_test
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCAmelCase : str = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCAmelCase : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCAmelCase : Optional[int] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : List[Any] = pt_inputs['input_ids'].shape
__lowerCAmelCase : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = 0
__lowerCAmelCase : Tuple = 1
__lowerCAmelCase : List[str] = 0
__lowerCAmelCase : Any = 1
__lowerCAmelCase : Optional[Any] = pt_model_class(_SCREAMING_SNAKE_CASE ).eval()
__lowerCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
__lowerCAmelCase : int = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = fx_state
with torch.no_grad():
__lowerCAmelCase : Union[str, Any] = pt_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
__lowerCAmelCase : str = fx_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = model_class.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = fx_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(
len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCAmelCase : List[str] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCAmelCase : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCAmelCase : str = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = pt_model_class(_SCREAMING_SNAKE_CASE ).eval()
__lowerCAmelCase : Tuple = model_class(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
__lowerCAmelCase : List[str] = load_flax_weights_in_pytorch_model(_SCREAMING_SNAKE_CASE , fx_model.params )
__lowerCAmelCase , __lowerCAmelCase : int = pt_inputs['input_ids'].shape
__lowerCAmelCase : List[str] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = 0
__lowerCAmelCase : Optional[Any] = 1
__lowerCAmelCase : Optional[int] = 0
__lowerCAmelCase : Optional[Any] = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__lowerCAmelCase : List[str] = pt_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
__lowerCAmelCase : Optional[int] = fx_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = pt_model_class.from_pretrained(_SCREAMING_SNAKE_CASE , from_flax=_SCREAMING_SNAKE_CASE )
with torch.no_grad():
__lowerCAmelCase : Any = pt_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(
len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__lowerCAmelCase : Optional[int] = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
__lowerCAmelCase : List[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) | 86 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ = {
"""configuration_mvp""": ["""MVP_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MvpConfig""", """MvpOnnxConfig"""],
"""tokenization_mvp""": ["""MvpTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""MvpTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""MVP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MvpForCausalLM""",
"""MvpForConditionalGeneration""",
"""MvpForQuestionAnswering""",
"""MvpForSequenceClassification""",
"""MvpModel""",
"""MvpPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 86 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , ):
__lowerCAmelCase : Tuple = parent
__lowerCAmelCase : Optional[int] = 13
__lowerCAmelCase : List[Any] = 7
__lowerCAmelCase : int = True
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : List[Any] = True
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : Optional[Any] = 99
__lowerCAmelCase : int = 3_84
__lowerCAmelCase : Union[str, Any] = 2
__lowerCAmelCase : Tuple = 4
__lowerCAmelCase : str = 37
__lowerCAmelCase : Any = 'gelu'
__lowerCAmelCase : List[str] = 0.1
__lowerCAmelCase : Any = 0.1
__lowerCAmelCase : Union[str, Any] = 5_12
__lowerCAmelCase : int = 16
__lowerCAmelCase : Union[str, Any] = 2
__lowerCAmelCase : int = 0.02
__lowerCAmelCase : Dict = 3
__lowerCAmelCase : Tuple = 4
__lowerCAmelCase : Tuple = 1_28
__lowerCAmelCase : Optional[int] = 2
__lowerCAmelCase : List[str] = 9
__lowerCAmelCase : int = 1
__lowerCAmelCase : int = None
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : Optional[int] = None
if self.use_input_mask:
__lowerCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : Tuple = None
if self.use_token_type_ids:
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase : Optional[Any] = None
__lowerCAmelCase : Dict = None
__lowerCAmelCase : Union[str, Any] = None
if self.use_labels:
__lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase : Union[str, Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_SCREAMING_SNAKE_CASE , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = TFConvBertModel(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__lowerCAmelCase : Tuple = [input_ids, input_mask]
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = TFConvBertForMaskedLM(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = self.num_labels
__lowerCAmelCase : Optional[Any] = TFConvBertForSequenceClassification(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = self.num_choices
__lowerCAmelCase : List[str] = TFConvBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase : Dict = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase : Union[str, Any] = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase : Tuple = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__lowerCAmelCase : str = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = self.num_labels
__lowerCAmelCase : Any = TFConvBertForTokenClassification(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = TFConvBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) : List[str] = config_and_inputs
__lowerCAmelCase : Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : List[str] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A_ : str = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A_ : List[Any] = False
A_ : str = False
A_ : List[Any] = False
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = TFConvBertModelTester(self )
__lowerCAmelCase : Any = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Any = True
__lowerCAmelCase : Dict = True
if hasattr(_SCREAMING_SNAKE_CASE , 'use_cache' ):
__lowerCAmelCase : int = True
__lowerCAmelCase : List[str] = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__lowerCAmelCase : str = getattr(self.model_tester , 'key_length' , _SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
__lowerCAmelCase : str = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = len(model(_SCREAMING_SNAKE_CASE ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE , saved_model=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = os.path.join(_SCREAMING_SNAKE_CASE , 'saved_model' , '1' )
__lowerCAmelCase : int = tf.keras.models.load_model(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
__lowerCAmelCase : List[str] = outputs['encoder_hidden_states']
__lowerCAmelCase : Tuple = outputs['encoder_attentions']
else:
__lowerCAmelCase : Optional[int] = outputs['hidden_states']
__lowerCAmelCase : Tuple = outputs['attentions']
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Optional[Any] = True
__lowerCAmelCase : List[Any] = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
__lowerCAmelCase : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__lowerCAmelCase : Tuple = getattr(self.model_tester , 'key_length' , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = getattr(self.model_tester , 'key_length' , _SCREAMING_SNAKE_CASE )
def check_decoder_attentions_output(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
self.assertEqual(out_len % 2 , 0 )
__lowerCAmelCase : Optional[Any] = outputs.decoder_attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : Optional[int] = False
__lowerCAmelCase : List[Any] = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Tuple = len(_SCREAMING_SNAKE_CASE )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
__lowerCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_decoder_attentions_output(_SCREAMING_SNAKE_CASE )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowerCAmelCase : Optional[Any] = True
__lowerCAmelCase : str = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
__lowerCAmelCase : Dict = True
__lowerCAmelCase : Optional[Any] = True
__lowerCAmelCase : List[Any] = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_SCREAMING_SNAKE_CASE ) )
self.assertEqual(model.config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
@require_tf
class A__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
__lowerCAmelCase : int = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowerCAmelCase : Tuple = model(_SCREAMING_SNAKE_CASE )[0]
__lowerCAmelCase : Tuple = [1, 6, 7_68]
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) | 86 | 1 |
"""simple docstring"""
import os
from pathlib import Path
def __lowerCAmelCase ():
from torch.utils.cpp_extension import load
__lowerCAmelCase : Dict = Path(_UpperCamelCase ).resolve().parent.parent.parent / 'kernels' / 'deformable_detr'
__lowerCAmelCase : int = [
root / filename
for filename in [
'vision.cpp',
os.path.join('cpu' , 'ms_deform_attn_cpu.cpp' ),
os.path.join('cuda' , 'ms_deform_attn_cuda.cu' ),
]
]
load(
'MultiScaleDeformableAttention' , _UpperCamelCase , with_cuda=_UpperCamelCase , extra_include_paths=[str(_UpperCamelCase )] , extra_cflags=['-DWITH_CUDA=1'] , extra_cuda_cflags=[
'-DCUDA_HAS_FP16=1',
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
] , )
import MultiScaleDeformableAttention as MSDA
return MSDA | 86 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class A__ ( unittest.TestCase):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=4_00 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 / 2_55 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__lowerCAmelCase : Any = size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33}
__lowerCAmelCase : Optional[int] = parent
__lowerCAmelCase : int = batch_size
__lowerCAmelCase : str = num_channels
__lowerCAmelCase : Optional[int] = min_resolution
__lowerCAmelCase : List[Any] = max_resolution
__lowerCAmelCase : Union[str, Any] = do_resize
__lowerCAmelCase : Optional[Any] = size
__lowerCAmelCase : Dict = do_rescale
__lowerCAmelCase : Optional[Any] = rescale_factor
__lowerCAmelCase : Any = do_normalize
__lowerCAmelCase : List[str] = image_mean
__lowerCAmelCase : Union[str, Any] = image_std
__lowerCAmelCase : Optional[int] = do_pad
def __lowerCamelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
if not batched:
__lowerCAmelCase : str = image_inputs[0]
if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ):
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = image.size
else:
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = image.shape[1], image.shape[2]
if w < h:
__lowerCAmelCase : str = int(self.size['shortest_edge'] * h / w )
__lowerCAmelCase : Optional[int] = self.size['shortest_edge']
elif w > h:
__lowerCAmelCase : str = self.size['shortest_edge']
__lowerCAmelCase : Union[str, Any] = int(self.size['shortest_edge'] * w / h )
else:
__lowerCAmelCase : str = self.size['shortest_edge']
__lowerCAmelCase : Optional[Any] = self.size['shortest_edge']
else:
__lowerCAmelCase : str = []
for image in image_inputs:
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCAmelCase : Any = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0]
__lowerCAmelCase : Dict = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : List[str] = DetrImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = DetrImageProcessingTester(self )
@property
def __lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_rescale' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'rescale_factor' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
# Initialize image_processing
__lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__lowerCAmelCase : int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase : int = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
# Initialize image_processing
__lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
__lowerCAmelCase : Tuple = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : Tuple = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
# Initialize image_processing
__lowerCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
__lowerCAmelCase : Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : Dict = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase : Tuple = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : Any = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __lowerCamelCase ( self ):
# prepare image and target
__lowerCAmelCase : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
__lowerCAmelCase : Any = json.loads(f.read() )
__lowerCAmelCase : Tuple = {'image_id': 3_97_69, 'annotations': target}
# encode them
__lowerCAmelCase : Dict = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
__lowerCAmelCase : int = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
__lowerCAmelCase : str = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
# verify area
__lowerCAmelCase : List[str] = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
__lowerCAmelCase : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# verify image_id
__lowerCAmelCase : Dict = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
__lowerCAmelCase : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
__lowerCAmelCase : Union[str, Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify orig_size
__lowerCAmelCase : int = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
__lowerCAmelCase : List[Any] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
@slow
def __lowerCamelCase ( self ):
# prepare image, target and masks_path
__lowerCAmelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
__lowerCAmelCase : Optional[int] = json.loads(f.read() )
__lowerCAmelCase : Optional[int] = {'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target}
__lowerCAmelCase : Union[str, Any] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
__lowerCAmelCase : Optional[int] = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
__lowerCAmelCase : Optional[Any] = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , masks_path=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
__lowerCAmelCase : str = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
# verify area
__lowerCAmelCase : int = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
__lowerCAmelCase : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# verify image_id
__lowerCAmelCase : str = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
__lowerCAmelCase : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
__lowerCAmelCase : str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify masks
__lowerCAmelCase : Dict = 82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _SCREAMING_SNAKE_CASE )
# verify orig_size
__lowerCAmelCase : str = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
__lowerCAmelCase : List[Any] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) ) | 86 | 1 |
"""simple docstring"""
import os
import pytest
from transformers.dynamic_module_utils import get_imports
lowerCamelCase__ = """
import os
"""
lowerCamelCase__ = """
def foo():
import os
return False
"""
lowerCamelCase__ = """
def foo():
def bar():
if True:
import os
return False
return bar()
"""
lowerCamelCase__ = """
import os
try:
import bar
except ImportError:
raise ValueError()
"""
lowerCamelCase__ = """
import os
def foo():
try:
import bar
except ImportError:
raise ValueError()
"""
lowerCamelCase__ = """
import os
try:
import bar
except (ImportError, AttributeError):
raise ValueError()
"""
lowerCamelCase__ = """
import os
try:
import bar
except ImportError as e:
raise ValueError()
"""
lowerCamelCase__ = """
import os
try:
import bar
except:
raise ValueError()
"""
lowerCamelCase__ = """
import os
try:
import bar
import baz
except ImportError:
raise ValueError()
"""
lowerCamelCase__ = """
import os
try:
import bar
import baz
except ImportError:
x = 1
raise ValueError()
"""
lowerCamelCase__ = [
TOP_LEVEL_IMPORT,
IMPORT_IN_FUNCTION,
DEEPLY_NESTED_IMPORT,
TOP_LEVEL_TRY_IMPORT,
GENERIC_EXCEPT_IMPORT,
MULTILINE_TRY_IMPORT,
MULTILINE_BOTH_IMPORT,
MULTIPLE_EXCEPTS_IMPORT,
EXCEPT_AS_IMPORT,
TRY_IMPORT_IN_FUNCTION,
]
@pytest.mark.parametrize('case' , _UpperCamelCase )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : str = os.path.join(_UpperCamelCase , 'test_file.py' )
with open(_UpperCamelCase , 'w' ) as _tmp_file:
_tmp_file.write(_UpperCamelCase )
__lowerCAmelCase : List[Any] = get_imports(_UpperCamelCase )
assert parsed_imports == ["os"] | 86 |
"""simple docstring"""
import numpy as np
def __lowerCAmelCase (_UpperCamelCase ):
return 1 / (1 + np.exp(-vector ))
def __lowerCAmelCase (_UpperCamelCase ):
return vector * sigmoid(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 | 1 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , ):
__lowerCAmelCase : Tuple = parent
__lowerCAmelCase : Optional[int] = 13
__lowerCAmelCase : List[Any] = 7
__lowerCAmelCase : int = True
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : List[Any] = True
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : Optional[Any] = 99
__lowerCAmelCase : int = 3_84
__lowerCAmelCase : Union[str, Any] = 2
__lowerCAmelCase : Tuple = 4
__lowerCAmelCase : str = 37
__lowerCAmelCase : Any = 'gelu'
__lowerCAmelCase : List[str] = 0.1
__lowerCAmelCase : Any = 0.1
__lowerCAmelCase : Union[str, Any] = 5_12
__lowerCAmelCase : int = 16
__lowerCAmelCase : Union[str, Any] = 2
__lowerCAmelCase : int = 0.02
__lowerCAmelCase : Dict = 3
__lowerCAmelCase : Tuple = 4
__lowerCAmelCase : Tuple = 1_28
__lowerCAmelCase : Optional[int] = 2
__lowerCAmelCase : List[str] = 9
__lowerCAmelCase : int = 1
__lowerCAmelCase : int = None
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : Optional[int] = None
if self.use_input_mask:
__lowerCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : Tuple = None
if self.use_token_type_ids:
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase : Optional[Any] = None
__lowerCAmelCase : Dict = None
__lowerCAmelCase : Union[str, Any] = None
if self.use_labels:
__lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase : Union[str, Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_SCREAMING_SNAKE_CASE , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = TFConvBertModel(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__lowerCAmelCase : Tuple = [input_ids, input_mask]
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = TFConvBertForMaskedLM(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = self.num_labels
__lowerCAmelCase : Optional[Any] = TFConvBertForSequenceClassification(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = self.num_choices
__lowerCAmelCase : List[str] = TFConvBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase : Dict = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase : Union[str, Any] = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase : Tuple = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__lowerCAmelCase : str = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = self.num_labels
__lowerCAmelCase : Any = TFConvBertForTokenClassification(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = TFConvBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) : List[str] = config_and_inputs
__lowerCAmelCase : Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : List[str] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A_ : str = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A_ : List[Any] = False
A_ : str = False
A_ : List[Any] = False
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = TFConvBertModelTester(self )
__lowerCAmelCase : Any = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Any = True
__lowerCAmelCase : Dict = True
if hasattr(_SCREAMING_SNAKE_CASE , 'use_cache' ):
__lowerCAmelCase : int = True
__lowerCAmelCase : List[str] = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__lowerCAmelCase : str = getattr(self.model_tester , 'key_length' , _SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
__lowerCAmelCase : str = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = len(model(_SCREAMING_SNAKE_CASE ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE , saved_model=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = os.path.join(_SCREAMING_SNAKE_CASE , 'saved_model' , '1' )
__lowerCAmelCase : int = tf.keras.models.load_model(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
__lowerCAmelCase : List[str] = outputs['encoder_hidden_states']
__lowerCAmelCase : Tuple = outputs['encoder_attentions']
else:
__lowerCAmelCase : Optional[int] = outputs['hidden_states']
__lowerCAmelCase : Tuple = outputs['attentions']
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Optional[Any] = True
__lowerCAmelCase : List[Any] = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
__lowerCAmelCase : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__lowerCAmelCase : Tuple = getattr(self.model_tester , 'key_length' , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = getattr(self.model_tester , 'key_length' , _SCREAMING_SNAKE_CASE )
def check_decoder_attentions_output(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
self.assertEqual(out_len % 2 , 0 )
__lowerCAmelCase : Optional[Any] = outputs.decoder_attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : Optional[int] = False
__lowerCAmelCase : List[Any] = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Tuple = len(_SCREAMING_SNAKE_CASE )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
__lowerCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_decoder_attentions_output(_SCREAMING_SNAKE_CASE )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowerCAmelCase : Optional[Any] = True
__lowerCAmelCase : str = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
__lowerCAmelCase : Dict = True
__lowerCAmelCase : Optional[Any] = True
__lowerCAmelCase : List[Any] = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_SCREAMING_SNAKE_CASE ) )
self.assertEqual(model.config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
@require_tf
class A__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
__lowerCAmelCase : int = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowerCAmelCase : Tuple = model(_SCREAMING_SNAKE_CASE )[0]
__lowerCAmelCase : Tuple = [1, 6, 7_68]
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) | 86 |
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , ):
__lowerCAmelCase : Optional[int] = parent
__lowerCAmelCase : Union[str, Any] = batch_size
__lowerCAmelCase : Dict = seq_length
__lowerCAmelCase : Dict = is_training
__lowerCAmelCase : List[str] = use_input_mask
__lowerCAmelCase : int = use_token_type_ids
__lowerCAmelCase : Optional[int] = use_labels
__lowerCAmelCase : List[Any] = vocab_size
__lowerCAmelCase : Dict = hidden_size
__lowerCAmelCase : Tuple = embedding_size
__lowerCAmelCase : List[Any] = num_hidden_layers
__lowerCAmelCase : Tuple = num_attention_heads
__lowerCAmelCase : Union[str, Any] = intermediate_size
__lowerCAmelCase : Optional[Any] = hidden_act
__lowerCAmelCase : Optional[int] = hidden_dropout_prob
__lowerCAmelCase : Dict = attention_probs_dropout_prob
__lowerCAmelCase : Any = max_position_embeddings
__lowerCAmelCase : Any = type_vocab_size
__lowerCAmelCase : Union[str, Any] = type_sequence_label_size
__lowerCAmelCase : List[str] = initializer_range
__lowerCAmelCase : str = num_labels
__lowerCAmelCase : int = num_choices
__lowerCAmelCase : Union[str, Any] = scope
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : Optional[int] = None
if self.use_input_mask:
__lowerCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : str = None
if self.use_token_type_ids:
__lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase : Union[str, Any] = None
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : Union[str, Any] = None
if self.use_labels:
__lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self ):
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = MobileBertModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = MobileBertForMaskedLM(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = MobileBertForNextSentencePrediction(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Dict = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = MobileBertForPreTraining(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : List[Any] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , next_sentence_label=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = MobileBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : List[str] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = self.num_labels
__lowerCAmelCase : Tuple = MobileBertForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : str = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = self.num_labels
__lowerCAmelCase : int = MobileBertForTokenClassification(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = self.num_choices
__lowerCAmelCase : List[str] = MobileBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : List[str] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) : Optional[Any] = config_and_inputs
__lowerCAmelCase : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : str = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
A_ : List[str] = (
{
'feature-extraction': MobileBertModel,
'fill-mask': MobileBertForMaskedLM,
'question-answering': MobileBertForQuestionAnswering,
'text-classification': MobileBertForSequenceClassification,
'token-classification': MobileBertForTokenClassification,
'zero-shot': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ : Dict = True
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
__lowerCAmelCase : List[str] = super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
if return_labels:
if model_class in get_values(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
return inputs_dict
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = MobileBertModelTester(self )
__lowerCAmelCase : str = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase (_UpperCamelCase ):
return torch.tensor(
_UpperCamelCase , dtype=torch.long , device=_UpperCamelCase , )
lowerCamelCase__ = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
__lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE )[0]
__lowerCAmelCase : Dict = torch.Size((1, 9, 5_12) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = torch.tensor(
[
[
[-2.4_73_65_26E07, 8.2_69_16_56E04, 1.6_52_18_38E05],
[-5.7_54_17_04E-01, 3.9_05_60_22E00, 4.4_01_15_07E00],
[2.6_04_73_59E00, 1.5_67_76_52E00, -1.7_32_41_88E-01],
]
] , device=_SCREAMING_SNAKE_CASE , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
__lowerCAmelCase : Tuple = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
__lowerCAmelCase : Union[str, Any] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound ) | 86 | 1 |
"""simple docstring"""
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""vocab_file""": """vocab.txt"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""openbmb/cpm-ant-10b""": """https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt""",
},
}
lowerCamelCase__ = {
"""openbmb/cpm-ant-10b""": 1_024,
}
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Union[str, Any] = collections.OrderedDict()
with open(_UpperCamelCase , 'r' , encoding='utf-8' ) as reader:
__lowerCAmelCase : Dict = reader.readlines()
for index, token in enumerate(_UpperCamelCase ):
__lowerCAmelCase : int = token.rstrip('\n' )
__lowerCAmelCase : str = index
return vocab
class A__ ( _lowerCamelCase):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE=2_00 ):
__lowerCAmelCase : Optional[Any] = vocab
__lowerCAmelCase : Optional[int] = unk_token
__lowerCAmelCase : List[Any] = max_input_chars_per_word
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = list(_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > self.max_input_chars_per_word:
return [self.unk_token]
__lowerCAmelCase : Tuple = 0
__lowerCAmelCase : List[str] = []
while start < len(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = len(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = None
while start < end:
__lowerCAmelCase : List[str] = ''.join(chars[start:end] )
if substr in self.vocab:
__lowerCAmelCase : str = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = end
return sub_tokens
class A__ ( _lowerCamelCase):
A_ : Any = VOCAB_FILES_NAMES
A_ : Any = PRETRAINED_VOCAB_FILES_MAP
A_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Optional[int] = ['input_ids', 'attention_mask']
A_ : int = False
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="<d>" , _SCREAMING_SNAKE_CASE="</d>" , _SCREAMING_SNAKE_CASE="<s>" , _SCREAMING_SNAKE_CASE="</s>" , _SCREAMING_SNAKE_CASE="<pad>" , _SCREAMING_SNAKE_CASE="<unk>" , _SCREAMING_SNAKE_CASE="</n>" , _SCREAMING_SNAKE_CASE="</_>" , _SCREAMING_SNAKE_CASE="left" , **_SCREAMING_SNAKE_CASE , ):
requires_backends(self , ['jieba'] )
super().__init__(
bod_token=_SCREAMING_SNAKE_CASE , eod_token=_SCREAMING_SNAKE_CASE , bos_token=_SCREAMING_SNAKE_CASE , eos_token=_SCREAMING_SNAKE_CASE , pad_token=_SCREAMING_SNAKE_CASE , unk_token=_SCREAMING_SNAKE_CASE , line_token=_SCREAMING_SNAKE_CASE , space_token=_SCREAMING_SNAKE_CASE , padding_side=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Optional[Any] = bod_token
__lowerCAmelCase : List[Any] = eod_token
__lowerCAmelCase : Dict = load_vocab(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = self.encoder[space_token]
__lowerCAmelCase : Union[str, Any] = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
__lowerCAmelCase : int = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _SCREAMING_SNAKE_CASE : x[1] ) )
__lowerCAmelCase : Union[str, Any] = {v: k for k, v in self.encoder.items()}
__lowerCAmelCase : List[Any] = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def __lowerCamelCase ( self ):
return self.encoder[self.bod_token]
@property
def __lowerCamelCase ( self ):
return self.encoder[self.eod_token]
@property
def __lowerCamelCase ( self ):
return self.encoder["\n"]
@property
def __lowerCamelCase ( self ):
return len(self.encoder )
def __lowerCamelCase ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = []
for x in jieba.cut(_SCREAMING_SNAKE_CASE , cut_all=_SCREAMING_SNAKE_CASE ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(_SCREAMING_SNAKE_CASE ) )
return output_tokens
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = [i for i in token_ids if i >= 0]
__lowerCAmelCase : Tuple = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
return token in self.encoder
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
return "".join(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
return self.encoder.get(_SCREAMING_SNAKE_CASE , self.encoder.get(self.unk_token ) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
return self.decoder.get(_SCREAMING_SNAKE_CASE , self.unk_token )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
if os.path.isdir(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = os.path.join(
_SCREAMING_SNAKE_CASE , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
else:
__lowerCAmelCase : int = (filename_prefix + '-' if filename_prefix else '') + save_directory
__lowerCAmelCase : int = 0
if " " in self.encoder:
__lowerCAmelCase : Optional[int] = self.encoder[' ']
del self.encoder[" "]
if "\n" in self.encoder:
__lowerCAmelCase : Optional[int] = self.encoder['\n']
del self.encoder["\n"]
__lowerCAmelCase : Union[str, Any] = collections.OrderedDict(sorted(self.encoder.items() , key=lambda _SCREAMING_SNAKE_CASE : x[1] ) )
with open(_SCREAMING_SNAKE_CASE , 'w' , encoding='utf-8' ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f"Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."
' Please check that the vocabulary is not corrupted!' )
__lowerCAmelCase : Optional[int] = token_index
writer.write(token + '\n' )
index += 1
return (vocab_file,)
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_SCREAMING_SNAKE_CASE , token_ids_a=_SCREAMING_SNAKE_CASE , already_has_special_tokens=_SCREAMING_SNAKE_CASE )
if token_ids_a is not None:
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) + [1] + ([0] * len(_SCREAMING_SNAKE_CASE ))
return [1] + ([0] * len(_SCREAMING_SNAKE_CASE )) | 86 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class A__ ( _lowerCamelCase):
A_ : Any = ['image_processor', 'tokenizer']
A_ : Optional[Any] = 'AutoImageProcessor'
A_ : str = 'AutoTokenizer'
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = kwargs.pop('feature_extractor' )
__lowerCAmelCase : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = self.image_processor
__lowerCAmelCase : Tuple = False
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = kwargs.pop('images' , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = kwargs.pop('text' , _SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
__lowerCAmelCase : Dict = args[0]
__lowerCAmelCase : Union[str, Any] = args[1:]
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
__lowerCAmelCase : Union[str, Any] = self.image_processor(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None:
__lowerCAmelCase : Dict = self.tokenizer(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is None:
return inputs
elif images is None:
return encodings
else:
__lowerCAmelCase : Union[str, Any] = encodings['input_ids']
return inputs
def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@contextmanager
def __lowerCamelCase ( self ):
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your images inputs, or in a separate call.' )
__lowerCAmelCase : Any = True
__lowerCAmelCase : Dict = self.tokenizer
yield
__lowerCAmelCase : Optional[int] = self.image_processor
__lowerCAmelCase : Optional[Any] = False
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None ):
if added_vocab is None:
__lowerCAmelCase : str = self.tokenizer.get_added_vocab()
__lowerCAmelCase : List[Any] = {}
while tokens:
__lowerCAmelCase : int = re.search(R'<s_(.*?)>' , _SCREAMING_SNAKE_CASE , re.IGNORECASE )
if start_token is None:
break
__lowerCAmelCase : Union[str, Any] = start_token.group(1 )
__lowerCAmelCase : Tuple = re.search(Rf"</s_{key}>" , _SCREAMING_SNAKE_CASE , re.IGNORECASE )
__lowerCAmelCase : str = start_token.group()
if end_token is None:
__lowerCAmelCase : Optional[int] = tokens.replace(_SCREAMING_SNAKE_CASE , '' )
else:
__lowerCAmelCase : Optional[Any] = end_token.group()
__lowerCAmelCase : Tuple = re.escape(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = re.escape(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = re.search(f"{start_token_escaped}(.*?){end_token_escaped}" , _SCREAMING_SNAKE_CASE , re.IGNORECASE )
if content is not None:
__lowerCAmelCase : List[str] = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
__lowerCAmelCase : int = self.tokenajson(_SCREAMING_SNAKE_CASE , is_inner_value=_SCREAMING_SNAKE_CASE , added_vocab=_SCREAMING_SNAKE_CASE )
if value:
if len(_SCREAMING_SNAKE_CASE ) == 1:
__lowerCAmelCase : Tuple = value[0]
__lowerCAmelCase : Tuple = value
else: # leaf nodes
__lowerCAmelCase : Any = []
for leaf in content.split(R'<sep/>' ):
__lowerCAmelCase : List[Any] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
__lowerCAmelCase : Dict = leaf[1:-2] # for categorical special tokens
output[key].append(_SCREAMING_SNAKE_CASE )
if len(output[key] ) == 1:
__lowerCAmelCase : str = output[key][0]
__lowerCAmelCase : Dict = tokens[tokens.find(_SCREAMING_SNAKE_CASE ) + len(_SCREAMING_SNAKE_CASE ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=_SCREAMING_SNAKE_CASE , added_vocab=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def __lowerCamelCase ( self ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def __lowerCamelCase ( self ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _SCREAMING_SNAKE_CASE , )
return self.image_processor | 86 | 1 |
"""simple docstring"""
import torch
from torch import nn
from transformers import CLIPPreTrainedModel, CLIPVisionModel
from ...models.attention import BasicTransformerBlock
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
class A__ ( _lowerCamelCase):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7_68 ):
super().__init__(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = proj_size
__lowerCAmelCase : str = CLIPVisionModel(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = PaintByExampleMapper(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = nn.LayerNorm(config.hidden_size )
__lowerCAmelCase : Optional[Any] = nn.Linear(config.hidden_size , self.proj_size )
# uncondition for scaling
__lowerCAmelCase : List[Any] = nn.Parameter(torch.randn((1, 1, self.proj_size) ) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
__lowerCAmelCase : Tuple = self.model(pixel_values=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = clip_output.pooler_output
__lowerCAmelCase : Any = self.mapper(latent_states[:, None] )
__lowerCAmelCase : Dict = self.final_layer_norm(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = self.proj_out(_SCREAMING_SNAKE_CASE )
if return_uncond_vector:
return latent_states, self.uncond_vector
return latent_states
class A__ ( nn.Module):
def __init__( self , _SCREAMING_SNAKE_CASE ):
super().__init__()
__lowerCAmelCase : Tuple = (config.num_hidden_layers + 1) // 5
__lowerCAmelCase : Dict = config.hidden_size
__lowerCAmelCase : List[Any] = 1
__lowerCAmelCase : Optional[int] = nn.ModuleList(
[
BasicTransformerBlock(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , activation_fn='gelu' , attention_bias=_SCREAMING_SNAKE_CASE )
for _ in range(_SCREAMING_SNAKE_CASE )
] )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
for block in self.blocks:
__lowerCAmelCase : List[Any] = block(_SCREAMING_SNAKE_CASE )
return hidden_states | 86 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Tuple = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def __lowerCAmelCase (_UpperCamelCase = 100 ):
__lowerCAmelCase : Optional[int] = 1
__lowerCAmelCase : Optional[Any] = 2
for i in range(2 , max_n + 1 ):
__lowerCAmelCase : Any = pre_numerator
__lowerCAmelCase : Union[str, Any] = 2 * i // 3 if i % 3 == 0 else 1
__lowerCAmelCase : int = cur_numerator
__lowerCAmelCase : Dict = e_cont * pre_numerator + temp
return sum_digits(_UpperCamelCase )
if __name__ == "__main__":
print(f'{solution() = }') | 86 | 1 |
"""simple docstring"""
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = Dict[str, Any]
lowerCamelCase__ = List[Prediction]
@add_end_docstrings(_lowerCamelCase)
class A__ ( _lowerCamelCase):
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if self.framework == "tf":
raise ValueError(f"The {self.__class__} is only available in PyTorch." )
requires_backends(self , 'vision' )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def __lowerCamelCase ( self , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = {}
if "threshold" in kwargs:
__lowerCAmelCase : int = kwargs['threshold']
return {}, {}, postprocess_kwargs
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return super().__call__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = load_image(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = torch.IntTensor([[image.height, image.width]] )
__lowerCAmelCase : int = self.image_processor(images=[image] , return_tensors='pt' )
if self.tokenizer is not None:
__lowerCAmelCase : Tuple = self.tokenizer(text=inputs['words'] , boxes=inputs['boxes'] , return_tensors='pt' )
__lowerCAmelCase : str = target_size
return inputs
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = model_inputs.pop('target_size' )
__lowerCAmelCase : int = self.model(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = outputs.__class__({'target_size': target_size, **outputs} )
if self.tokenizer is not None:
__lowerCAmelCase : Dict = model_inputs['bbox']
return model_outputs
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0.9 ):
__lowerCAmelCase : Union[str, Any] = model_outputs['target_size']
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
__lowerCAmelCase , __lowerCAmelCase : int = target_size[0].tolist()
def unnormalize(_SCREAMING_SNAKE_CASE ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 10_00),
(height * bbox[1] / 10_00),
(width * bbox[2] / 10_00),
(height * bbox[3] / 10_00),
] ) )
__lowerCAmelCase , __lowerCAmelCase : List[Any] = model_outputs['logits'].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
__lowerCAmelCase : Optional[Any] = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
__lowerCAmelCase : Any = [unnormalize(_SCREAMING_SNAKE_CASE ) for bbox in model_outputs['bbox'].squeeze(0 )]
__lowerCAmelCase : List[str] = ['score', 'label', 'box']
__lowerCAmelCase : Tuple = [dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) ) for vals in zip(scores.tolist() , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
__lowerCAmelCase : Tuple = self.image_processor.post_process_object_detection(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = raw_annotations[0]
__lowerCAmelCase : Dict = raw_annotation['scores']
__lowerCAmelCase : Dict = raw_annotation['labels']
__lowerCAmelCase : int = raw_annotation['boxes']
__lowerCAmelCase : Any = scores.tolist()
__lowerCAmelCase : Any = [self.model.config.idalabel[label.item()] for label in labels]
__lowerCAmelCase : Optional[int] = [self._get_bounding_box(_SCREAMING_SNAKE_CASE ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
__lowerCAmelCase : List[Any] = ['score', 'label', 'box']
__lowerCAmelCase : str = [
dict(zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
for vals in zip(raw_annotation['scores'] , raw_annotation['labels'] , raw_annotation['boxes'] )
]
return annotation
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
if self.framework != "pt":
raise ValueError('The ObjectDetectionPipeline is only available in PyTorch.' )
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[int] = box.int().tolist()
__lowerCAmelCase : str = {
'xmin': xmin,
'ymin': ymin,
'xmax': xmax,
'ymax': ymax,
}
return bbox | 86 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""",
"""microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""",
}
class A__ ( _lowerCamelCase):
A_ : List[Any] = 'markuplm'
def __init__( self , _SCREAMING_SNAKE_CASE=3_05_22 , _SCREAMING_SNAKE_CASE=7_68 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=30_72 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2_56 , _SCREAMING_SNAKE_CASE=10_24 , _SCREAMING_SNAKE_CASE=2_16 , _SCREAMING_SNAKE_CASE=10_01 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=50 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ):
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = vocab_size
__lowerCAmelCase : Any = hidden_size
__lowerCAmelCase : List[Any] = num_hidden_layers
__lowerCAmelCase : Tuple = num_attention_heads
__lowerCAmelCase : Union[str, Any] = hidden_act
__lowerCAmelCase : List[Any] = intermediate_size
__lowerCAmelCase : List[str] = hidden_dropout_prob
__lowerCAmelCase : List[str] = attention_probs_dropout_prob
__lowerCAmelCase : Optional[int] = max_position_embeddings
__lowerCAmelCase : int = type_vocab_size
__lowerCAmelCase : Tuple = initializer_range
__lowerCAmelCase : int = layer_norm_eps
__lowerCAmelCase : List[str] = position_embedding_type
__lowerCAmelCase : List[Any] = use_cache
__lowerCAmelCase : Optional[Any] = classifier_dropout
# additional properties
__lowerCAmelCase : Optional[int] = max_depth
__lowerCAmelCase : List[str] = max_xpath_tag_unit_embeddings
__lowerCAmelCase : Optional[Any] = max_xpath_subs_unit_embeddings
__lowerCAmelCase : Any = tag_pad_id
__lowerCAmelCase : Union[str, Any] = subs_pad_id
__lowerCAmelCase : int = xpath_unit_hidden_size | 86 | 1 |
"""simple docstring"""
from typing import Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(_lowerCamelCase)
class A__ ( _lowerCamelCase):
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
self.check_model_type(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase , __lowerCAmelCase : List[Any] = {}, {}
if padding is not None:
__lowerCAmelCase : List[Any] = padding
if truncation is not None:
__lowerCAmelCase : Tuple = truncation
if top_k is not None:
__lowerCAmelCase : List[str] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , **_SCREAMING_SNAKE_CASE ):
if isinstance(_SCREAMING_SNAKE_CASE , (Image.Image, str) ) and isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = {'image': image, 'question': question}
else:
__lowerCAmelCase : str = image
__lowerCAmelCase : Tuple = super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
return results
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False ):
__lowerCAmelCase : Tuple = load_image(inputs['image'] )
__lowerCAmelCase : List[Any] = self.tokenizer(
inputs['question'] , return_tensors=self.framework , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = self.image_processor(images=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
model_inputs.update(_SCREAMING_SNAKE_CASE )
return model_inputs
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = self.model(**_SCREAMING_SNAKE_CASE )
return model_outputs
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=5 ):
if top_k > self.model.config.num_labels:
__lowerCAmelCase : Any = self.model.config.num_labels
if self.framework == "pt":
__lowerCAmelCase : List[Any] = model_outputs.logits.sigmoid()[0]
__lowerCAmelCase , __lowerCAmelCase : Dict = probs.topk(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
__lowerCAmelCase : int = scores.tolist()
__lowerCAmelCase : Tuple = ids.tolist()
return [{"score": score, "answer": self.model.config.idalabel[_id]} for score, _id in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )] | 86 |
"""simple docstring"""
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
lowerCamelCase__ = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
for attribute in key.split('.' ):
__lowerCAmelCase : str = getattr(_UpperCamelCase , _UpperCamelCase )
if weight_type is not None:
__lowerCAmelCase : Tuple = getattr(_UpperCamelCase , _UpperCamelCase ).shape
else:
__lowerCAmelCase : Dict = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
__lowerCAmelCase : Union[str, Any] = value
elif weight_type == "weight_g":
__lowerCAmelCase : List[Any] = value
elif weight_type == "weight_v":
__lowerCAmelCase : Any = value
elif weight_type == "bias":
__lowerCAmelCase : List[str] = value
else:
__lowerCAmelCase : List[Any] = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Any = []
__lowerCAmelCase : Optional[int] = fairseq_model.state_dict()
__lowerCAmelCase : Union[str, Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__lowerCAmelCase : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hf_model.config.feat_extract_norm == 'group' , )
__lowerCAmelCase : str = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__lowerCAmelCase : int = True
if "*" in mapped_key:
__lowerCAmelCase : List[str] = name.split(_UpperCamelCase )[0].split('.' )[-2]
__lowerCAmelCase : Optional[Any] = mapped_key.replace('*' , _UpperCamelCase )
if "weight_g" in name:
__lowerCAmelCase : Union[str, Any] = 'weight_g'
elif "weight_v" in name:
__lowerCAmelCase : int = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
__lowerCAmelCase : Optional[Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCAmelCase : List[str] = 'weight'
else:
__lowerCAmelCase : Optional[Any] = None
set_recursively(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
continue
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(F"Unused weights: {unused_weights}" )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : List[Any] = full_name.split('conv_layers.' )[-1]
__lowerCAmelCase : Any = name.split('.' )
__lowerCAmelCase : List[Any] = int(items[0] )
__lowerCAmelCase : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
__lowerCAmelCase : Tuple = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
__lowerCAmelCase : int = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
__lowerCAmelCase : Optional[Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
__lowerCAmelCase : Any = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_UpperCamelCase )
@torch.no_grad()
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
# load the pre-trained checkpoints
__lowerCAmelCase : Any = torch.load(_UpperCamelCase )
__lowerCAmelCase : List[str] = WavLMConfigOrig(checkpoint['cfg'] )
__lowerCAmelCase : Optional[Any] = WavLMOrig(_UpperCamelCase )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
__lowerCAmelCase : Dict = WavLMConfig.from_pretrained(_UpperCamelCase )
else:
__lowerCAmelCase : List[str] = WavLMConfig()
__lowerCAmelCase : List[str] = WavLMModel(_UpperCamelCase )
recursively_load_weights(_UpperCamelCase , _UpperCamelCase )
hf_wavlm.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
lowerCamelCase__ = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 86 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class A__ ( _lowerCamelCase):
A_ : Optional[int] = 'poolformer'
def __init__( self , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4.0 , _SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , _SCREAMING_SNAKE_CASE=[64, 1_28, 3_20, 5_12] , _SCREAMING_SNAKE_CASE=[7, 3, 3, 3] , _SCREAMING_SNAKE_CASE=[4, 2, 2, 2] , _SCREAMING_SNAKE_CASE=[2, 1, 1, 1] , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE=0.02 , **_SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : int = num_channels
__lowerCAmelCase : str = patch_size
__lowerCAmelCase : Optional[Any] = stride
__lowerCAmelCase : Optional[int] = padding
__lowerCAmelCase : List[Any] = pool_size
__lowerCAmelCase : int = hidden_sizes
__lowerCAmelCase : str = mlp_ratio
__lowerCAmelCase : Optional[int] = depths
__lowerCAmelCase : str = patch_sizes
__lowerCAmelCase : str = strides
__lowerCAmelCase : Optional[int] = num_encoder_blocks
__lowerCAmelCase : Any = drop_path_rate
__lowerCAmelCase : Any = hidden_act
__lowerCAmelCase : Dict = use_layer_scale
__lowerCAmelCase : Union[str, Any] = layer_scale_init_value
__lowerCAmelCase : Dict = initializer_range
super().__init__(**_SCREAMING_SNAKE_CASE )
class A__ ( _lowerCamelCase):
A_ : List[str] = version.parse('1.11')
@property
def __lowerCamelCase ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __lowerCamelCase ( self ):
return 2E-3 | 86 |
"""simple docstring"""
import os
import pytest
from attr import dataclass
lowerCamelCase__ = """us-east-1""" # defaults region
@dataclass
class A__ :
A_ : str
A_ : Union[str, Any] = 'arn:aws:iam::558105141721:role/sagemaker_execution_role'
A_ : Optional[int] = {
'task_name': 'mnli',
'per_device_train_batch_size': 1_6,
'per_device_eval_batch_size': 1_6,
'do_train': True,
'do_eval': True,
'do_predict': True,
'output_dir': '/opt/ml/model',
'overwrite_output_dir': True,
'max_steps': 5_0_0,
'save_steps': 5_5_0_0,
}
A_ : List[Any] = {**hyperparameters, 'max_steps': 1_0_0_0}
@property
def __lowerCamelCase ( self ):
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def __lowerCamelCase ( self ):
return f"{self.framework}-transfromers-test"
@property
def __lowerCamelCase ( self ):
return f"./tests/sagemaker/scripts/{self.framework}"
@property
def __lowerCamelCase ( self ):
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope='class' )
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : str = SageMakerTestEnvironment(framework=request.cls.framework ) | 86 | 1 |
"""simple docstring"""
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = set_counts
__lowerCAmelCase : Tuple = max(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = len(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = [1] * num_sets
__lowerCAmelCase : Any = list(range(_SCREAMING_SNAKE_CASE ) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = self.get_parent(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = self.get_parent(_SCREAMING_SNAKE_CASE )
if src_parent == dst_parent:
return False
if self.ranks[dst_parent] >= self.ranks[src_parent]:
self.set_counts[dst_parent] += self.set_counts[src_parent]
__lowerCAmelCase : List[str] = 0
__lowerCAmelCase : int = dst_parent
if self.ranks[dst_parent] == self.ranks[src_parent]:
self.ranks[dst_parent] += 1
__lowerCAmelCase : List[Any] = self.set_counts[dst_parent]
else:
self.set_counts[src_parent] += self.set_counts[dst_parent]
__lowerCAmelCase : List[Any] = 0
__lowerCAmelCase : Optional[int] = src_parent
__lowerCAmelCase : str = self.set_counts[src_parent]
__lowerCAmelCase : Optional[Any] = max(self.max_set , _SCREAMING_SNAKE_CASE )
return True
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
if self.parents[disj_set] == disj_set:
return disj_set
__lowerCAmelCase : List[Any] = self.get_parent(self.parents[disj_set] )
return self.parents[disj_set] | 86 |
"""simple docstring"""
from __future__ import annotations
lowerCamelCase__ = list[tuple[int, int]]
lowerCamelCase__ = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
lowerCamelCase__ = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : int = pos_x
__lowerCAmelCase : Optional[Any] = pos_y
__lowerCAmelCase : Optional[int] = (pos_y, pos_x)
__lowerCAmelCase : Union[str, Any] = goal_x
__lowerCAmelCase : Any = goal_y
__lowerCAmelCase : Optional[Any] = g_cost
__lowerCAmelCase : Any = parent
__lowerCAmelCase : Union[str, Any] = self.calculate_heuristic()
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = abs(self.pos_x - self.goal_x )
__lowerCAmelCase : str = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self , _SCREAMING_SNAKE_CASE ):
return self.f_cost < other.f_cost
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_99_99 , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = [self.start]
__lowerCAmelCase : list[Node] = []
__lowerCAmelCase : str = False
def __lowerCamelCase ( self ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
__lowerCAmelCase : Optional[int] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
__lowerCAmelCase : Union[str, Any] = True
return self.retrace_path(_SCREAMING_SNAKE_CASE )
self.closed_nodes.append(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = self.get_successors(_SCREAMING_SNAKE_CASE )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
else:
# retrieve the best current path
__lowerCAmelCase : Optional[Any] = self.open_nodes.pop(self.open_nodes.index(_SCREAMING_SNAKE_CASE ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
else:
self.open_nodes.append(_SCREAMING_SNAKE_CASE )
if not self.reached:
return [self.start.pos]
return None
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = []
for action in delta:
__lowerCAmelCase : Optional[int] = parent.pos_x + action[1]
__lowerCAmelCase : Union[str, Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_SCREAMING_SNAKE_CASE ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _SCREAMING_SNAKE_CASE , ) )
return successors
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = node
__lowerCAmelCase : Optional[int] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__lowerCAmelCase : int = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
lowerCamelCase__ = (0, 0)
lowerCamelCase__ = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("""------""")
lowerCamelCase__ = GreedyBestFirst(init, goal)
lowerCamelCase__ = greedy_bf.search()
if path:
for pos_x, pos_y in path:
lowerCamelCase__ = 2
for elem in grid:
print(elem) | 86 | 1 |
"""simple docstring"""
from __future__ import annotations
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : List[Any] = []
__lowerCAmelCase , __lowerCAmelCase : Tuple = input_list[low:mid], input_list[mid : high + 1]
while left and right:
result.append((left if left[0] <= right[0] else right).pop(0 ) )
__lowerCAmelCase : Union[str, Any] = result + left + right
return input_list
def __lowerCAmelCase (_UpperCamelCase ):
if len(_UpperCamelCase ) <= 1:
return input_list
__lowerCAmelCase : List[str] = list(_UpperCamelCase )
# iteration for two-way merging
__lowerCAmelCase : Optional[int] = 2
while p <= len(_UpperCamelCase ):
# getting low, high and middle value for merge-sort of single list
for i in range(0 , len(_UpperCamelCase ) , _UpperCamelCase ):
__lowerCAmelCase : Optional[Any] = i
__lowerCAmelCase : Optional[int] = i + p - 1
__lowerCAmelCase : Tuple = (low + high + 1) // 2
__lowerCAmelCase : Union[str, Any] = merge(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
# final merge of last two parts
if p * 2 >= len(_UpperCamelCase ):
__lowerCAmelCase : Dict = i
__lowerCAmelCase : Tuple = merge(_UpperCamelCase , 0 , _UpperCamelCase , len(_UpperCamelCase ) - 1 )
break
p *= 2
return input_list
if __name__ == "__main__":
lowerCamelCase__ = input("""Enter numbers separated by a comma:\n""").strip()
if user_input == "":
lowerCamelCase__ = []
else:
lowerCamelCase__ = [int(item.strip()) for item in user_input.split(""",""")]
print(iter_merge_sort(unsorted)) | 86 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float(moles / volume ) * nfactor )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float((moles * 0.0821 * temperature) / (volume) ) )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float((moles * 0.0821 * temperature) / (pressure) ) )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
return round(float((pressure * volume) / (0.0821 * moles) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 | 1 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ ( _lowerCamelCase):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE="last" , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , ):
__lowerCAmelCase : int = parent
__lowerCAmelCase : List[Any] = batch_size
__lowerCAmelCase : Dict = seq_length
__lowerCAmelCase : int = is_training
__lowerCAmelCase : Optional[int] = use_input_lengths
__lowerCAmelCase : str = use_token_type_ids
__lowerCAmelCase : int = use_labels
__lowerCAmelCase : Union[str, Any] = gelu_activation
__lowerCAmelCase : List[str] = sinusoidal_embeddings
__lowerCAmelCase : Tuple = causal
__lowerCAmelCase : Union[str, Any] = asm
__lowerCAmelCase : str = n_langs
__lowerCAmelCase : Dict = vocab_size
__lowerCAmelCase : List[str] = n_special
__lowerCAmelCase : Union[str, Any] = hidden_size
__lowerCAmelCase : Optional[Any] = num_hidden_layers
__lowerCAmelCase : Union[str, Any] = num_attention_heads
__lowerCAmelCase : str = hidden_dropout_prob
__lowerCAmelCase : Tuple = attention_probs_dropout_prob
__lowerCAmelCase : str = max_position_embeddings
__lowerCAmelCase : List[str] = type_vocab_size
__lowerCAmelCase : str = type_sequence_label_size
__lowerCAmelCase : int = initializer_range
__lowerCAmelCase : List[Any] = num_labels
__lowerCAmelCase : Optional[Any] = num_choices
__lowerCAmelCase : Any = summary_type
__lowerCAmelCase : str = use_proj
__lowerCAmelCase : Optional[int] = scope
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : int = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : int = None
if self.use_input_lengths:
__lowerCAmelCase : Tuple = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
__lowerCAmelCase : str = None
if self.use_token_type_ids:
__lowerCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : int = None
__lowerCAmelCase : int = None
if self.use_labels:
__lowerCAmelCase : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , 2 ).float()
__lowerCAmelCase : Any = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase : Any = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def __lowerCamelCase ( self ):
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : int = FlaubertModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : List[str] = model(_SCREAMING_SNAKE_CASE , lengths=_SCREAMING_SNAKE_CASE , langs=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE , langs=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : str = FlaubertWithLMHeadModel(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : Tuple = FlaubertForQuestionAnsweringSimple(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : List[str] = model(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = model(_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : Optional[Any] = FlaubertForQuestionAnswering(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = model(
_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , cls_index=_SCREAMING_SNAKE_CASE , is_impossible=_SCREAMING_SNAKE_CASE , p_mask=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Dict = model(
_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , cls_index=_SCREAMING_SNAKE_CASE , is_impossible=_SCREAMING_SNAKE_CASE , )
((__lowerCAmelCase) , ) : Optional[Any] = result_with_labels.to_tuple()
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE )
((__lowerCAmelCase) , ) : str = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : List[Any] = FlaubertForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : str = model(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = model(_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : str = self.num_labels
__lowerCAmelCase : List[str] = FlaubertForTokenClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : List[str] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : Union[str, Any] = self.num_choices
__lowerCAmelCase : Optional[int] = FlaubertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : Union[str, Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : str = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) : List[str] = config_and_inputs
__lowerCAmelCase : Optional[int] = {
'input_ids': input_ids,
'token_type_ids': token_type_ids,
'lengths': input_lengths,
'attention_mask': input_mask,
}
return config, inputs_dict
@require_torch
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : List[Any] = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
A_ : int = (
{
'feature-extraction': FlaubertModel,
'fill-mask': FlaubertWithLMHeadModel,
'question-answering': FlaubertForQuestionAnsweringSimple,
'text-classification': FlaubertForSequenceClassification,
'token-classification': FlaubertForTokenClassification,
'zero-shot': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
__lowerCAmelCase : Union[str, Any] = super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
__lowerCAmelCase : Dict = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
return inputs_dict
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = FlaubertModelTester(self )
__lowerCAmelCase : List[Any] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , emb_dim=37 )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*_SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Any = FlaubertModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@slow
@require_torch_gpu
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
__lowerCAmelCase : str = True
__lowerCAmelCase : Any = model_class(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = torch.jit.trace(
_SCREAMING_SNAKE_CASE , (inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(_SCREAMING_SNAKE_CASE , os.path.join(_SCREAMING_SNAKE_CASE , 'traced_model.pt' ) )
__lowerCAmelCase : Union[str, Any] = torch.jit.load(os.path.join(_SCREAMING_SNAKE_CASE , 'traced_model.pt' ) , map_location=_SCREAMING_SNAKE_CASE )
loaded(inputs_dict['input_ids'].to(_SCREAMING_SNAKE_CASE ) , inputs_dict['attention_mask'].to(_SCREAMING_SNAKE_CASE ) )
@require_torch
class A__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = FlaubertModel.from_pretrained('flaubert/flaubert_base_cased' )
__lowerCAmelCase : Optional[int] = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
with torch.no_grad():
__lowerCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE )[0]
__lowerCAmelCase : List[Any] = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = torch.tensor(
[[[-2.6251, -1.4298, -0.0227], [-2.8510, -1.6387, 0.2258], [-2.8114, -1.1832, -0.3066]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) ) | 86 |
"""simple docstring"""
import enum
import warnings
from .. import MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_CAUSAL_LM_MAPPING
from ..utils import add_end_docstrings, is_tf_available
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
class A__ ( enum.Enum):
A_ : List[Any] = 0
A_ : Dict = 1
A_ : Union[str, Any] = 2
@add_end_docstrings(_lowerCamelCase)
class A__ ( _lowerCamelCase):
A_ : str = '\n In 1991, the remains of Russian Tsar Nicholas II and his family (except for Alexei and Maria) are discovered. The\n voice of Nicholas\'s young son, Tsarevich Alexei Nikolaevich, narrates the remainder of the story. 1883 Western\n Siberia, a young Grigori Rasputin is asked by his father and a group of men to perform magic. Rasputin has a vision\n and denounces one of the men as a horse thief. Although his father initially slaps him for making such an\n accusation, Rasputin watches as the man is chased outside and beaten. Twenty years later, Rasputin sees a vision of\n the Virgin Mary, prompting him to become a priest. Rasputin quickly becomes famous, with people, even a bishop,\n begging for his blessing. <eod> </s> <eos>\n '
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
self.check_model_type(
TF_MODEL_FOR_CAUSAL_LM_MAPPING if self.framework == 'tf' else MODEL_FOR_CAUSAL_LM_MAPPING )
if "prefix" not in self._preprocess_params:
# This is very specific. The logic is quite complex and needs to be done
# as a "default".
# It also defines both some preprocess_kwargs and generate_kwargs
# which is why we cannot put them in their respective methods.
__lowerCAmelCase : Any = None
if self.model.config.prefix is not None:
__lowerCAmelCase : str = self.model.config.prefix
if prefix is None and self.model.__class__.__name__ in [
"XLNetLMHeadModel",
"TransfoXLLMHeadModel",
"TFXLNetLMHeadModel",
"TFTransfoXLLMHeadModel",
]:
# For XLNet and TransformerXL we add an article to the prompt to give more state to the model.
__lowerCAmelCase : Tuple = self.XL_PREFIX
if prefix is not None:
# Recalculate some generate_kwargs linked to prefix.
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self._sanitize_parameters(prefix=_SCREAMING_SNAKE_CASE , **self._forward_params )
__lowerCAmelCase : List[str] = {**self._preprocess_params, **preprocess_params}
__lowerCAmelCase : List[str] = {**self._forward_params, **forward_params}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : Optional[int] = {}
if prefix is not None:
__lowerCAmelCase : Union[str, Any] = prefix
if prefix:
__lowerCAmelCase : Dict = self.tokenizer(
_SCREAMING_SNAKE_CASE , padding=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
__lowerCAmelCase : List[Any] = prefix_inputs['input_ids'].shape[-1]
if handle_long_generation is not None:
if handle_long_generation not in {"hole"}:
raise ValueError(
f"{handle_long_generation} is not a valid value for `handle_long_generation` parameter expected"
' [None, \'hole\']' )
__lowerCAmelCase : int = handle_long_generation
preprocess_params.update(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = generate_kwargs
__lowerCAmelCase : List[Any] = {}
if return_full_text is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_full_text`' )
if return_tensors is not None:
raise ValueError('`return_full_text` is mutually exclusive with `return_tensors`' )
__lowerCAmelCase : Optional[Any] = ReturnType.FULL_TEXT if return_full_text else ReturnType.NEW_TEXT
if return_tensors is not None and return_type is None:
if return_text is not None:
raise ValueError('`return_text` is mutually exclusive with `return_tensors`' )
__lowerCAmelCase : List[Any] = ReturnType.TENSORS
if return_type is not None:
__lowerCAmelCase : Optional[Any] = return_type
if clean_up_tokenization_spaces is not None:
__lowerCAmelCase : Tuple = clean_up_tokenization_spaces
if stop_sequence is not None:
__lowerCAmelCase : Union[str, Any] = self.tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 1:
warnings.warn(
'Stopping on a multiple token sequence is not yet supported on transformers. The first token of'
' the stop sequence will be used as the stop sequence string in the interim.' )
__lowerCAmelCase : Optional[Any] = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
# Parse arguments
if self.model.__class__.__name__ in ["TransfoXLLMHeadModel"]:
kwargs.update({'add_space_before_punct_symbol': True} )
return super()._parse_and_tokenize(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE="" , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = self.tokenizer(
prefix + prompt_text , padding=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
__lowerCAmelCase : Optional[Any] = prompt_text
if handle_long_generation == "hole":
__lowerCAmelCase : str = inputs['input_ids'].shape[-1]
if "max_new_tokens" in generate_kwargs:
__lowerCAmelCase : Union[str, Any] = generate_kwargs['max_new_tokens']
else:
__lowerCAmelCase : Any = generate_kwargs.get('max_length' , self.model.config.max_length ) - cur_len
if new_tokens < 0:
raise ValueError('We cannot infer how many new tokens are expected' )
if cur_len + new_tokens > self.tokenizer.model_max_length:
__lowerCAmelCase : Any = self.tokenizer.model_max_length - new_tokens
if keep_length <= 0:
raise ValueError(
'We cannot use `hole` to handle this generation the number of desired tokens exceeds the'
' models max length' )
__lowerCAmelCase : int = inputs['input_ids'][:, -keep_length:]
if "attention_mask" in inputs:
__lowerCAmelCase : List[Any] = inputs['attention_mask'][:, -keep_length:]
return inputs
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = model_inputs['input_ids']
__lowerCAmelCase : List[Any] = model_inputs.get('attention_mask' , _SCREAMING_SNAKE_CASE )
# Allow empty prompts
if input_ids.shape[1] == 0:
__lowerCAmelCase : Dict = None
__lowerCAmelCase : str = None
__lowerCAmelCase : Tuple = 1
else:
__lowerCAmelCase : Any = input_ids.shape[0]
__lowerCAmelCase : Union[str, Any] = model_inputs.pop('prompt_text' )
# If there is a prefix, we may need to adjust the generation length. Do so without permanently modifying
# generate_kwargs, as some of the parameterization may come from the initialization of the pipeline.
__lowerCAmelCase : Optional[int] = generate_kwargs.pop('prefix_length' , 0 )
if prefix_length > 0:
__lowerCAmelCase : Any = 'max_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].max_new_tokens is not None
)
if not has_max_new_tokens:
__lowerCAmelCase : List[str] = generate_kwargs.get('max_length' ) or self.model.config.max_length
generate_kwargs["max_length"] += prefix_length
__lowerCAmelCase : Dict = 'min_new_tokens' in generate_kwargs or (
'generation_config' in generate_kwargs
and generate_kwargs['generation_config'].min_new_tokens is not None
)
if not has_min_new_tokens and "min_length" in generate_kwargs:
generate_kwargs["min_length"] += prefix_length
# BS x SL
__lowerCAmelCase : Optional[int] = self.model.generate(input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = generated_sequence.shape[0]
if self.framework == "pt":
__lowerCAmelCase : Dict = generated_sequence.reshape(_SCREAMING_SNAKE_CASE , out_b // in_b , *generated_sequence.shape[1:] )
elif self.framework == "tf":
__lowerCAmelCase : Any = tf.reshape(_SCREAMING_SNAKE_CASE , (in_b, out_b // in_b, *generated_sequence.shape[1:]) )
return {"generated_sequence": generated_sequence, "input_ids": input_ids, "prompt_text": prompt_text}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=ReturnType.FULL_TEXT , _SCREAMING_SNAKE_CASE=True ):
__lowerCAmelCase : Any = model_outputs['generated_sequence'][0]
__lowerCAmelCase : Tuple = model_outputs['input_ids']
__lowerCAmelCase : Any = model_outputs['prompt_text']
__lowerCAmelCase : int = generated_sequence.numpy().tolist()
__lowerCAmelCase : Union[str, Any] = []
for sequence in generated_sequence:
if return_type == ReturnType.TENSORS:
__lowerCAmelCase : int = {'generated_token_ids': sequence}
elif return_type in {ReturnType.NEW_TEXT, ReturnType.FULL_TEXT}:
# Decode text
__lowerCAmelCase : Any = self.tokenizer.decode(
_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE , )
# Remove PADDING prompt of the sequence if XLNet or Transfo-XL model is used
if input_ids is None:
__lowerCAmelCase : Optional[Any] = 0
else:
__lowerCAmelCase : Any = len(
self.tokenizer.decode(
input_ids[0] , skip_special_tokens=_SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=_SCREAMING_SNAKE_CASE , ) )
if return_type == ReturnType.FULL_TEXT:
__lowerCAmelCase : Union[str, Any] = prompt_text + text[prompt_length:]
else:
__lowerCAmelCase : int = text[prompt_length:]
__lowerCAmelCase : Dict = {'generated_text': all_text}
records.append(_SCREAMING_SNAKE_CASE )
return records | 86 | 1 |
"""simple docstring"""
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def __lowerCAmelCase ():
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = 9, 14 # noqa: F841
__lowerCAmelCase : Optional[int] = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
__lowerCAmelCase : Union[str, Any] = defaultdict(_UpperCamelCase )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
__lowerCAmelCase : Any = mst(_UpperCamelCase )
__lowerCAmelCase : List[str] = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
__lowerCAmelCase : Optional[Any] = tuple(answer[:2] )
__lowerCAmelCase : Union[str, Any] = tuple(edge[::-1] )
assert edge in result or reverse in result | 86 |
"""simple docstring"""
from __future__ import annotations
import bisect
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
if hi < 0:
__lowerCAmelCase : Tuple = len(_UpperCamelCase )
while lo < hi:
__lowerCAmelCase : Optional[int] = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__lowerCAmelCase : int = mid + 1
else:
__lowerCAmelCase : List[str] = mid
return lo
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
if hi < 0:
__lowerCAmelCase : List[Any] = len(_UpperCamelCase )
while lo < hi:
__lowerCAmelCase : Union[str, Any] = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__lowerCAmelCase : Dict = mid + 1
else:
__lowerCAmelCase : str = mid
return lo
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
sorted_collection.insert(bisect_left(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase = 0 , _UpperCamelCase = -1 ):
sorted_collection.insert(bisect_right(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ) , _UpperCamelCase )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : List[Any] = 0
__lowerCAmelCase : int = len(_UpperCamelCase ) - 1
while left <= right:
__lowerCAmelCase : List[Any] = left + (right - left) // 2
__lowerCAmelCase : Union[str, Any] = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__lowerCAmelCase : Tuple = midpoint - 1
else:
__lowerCAmelCase : str = midpoint + 1
return None
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Union[str, Any] = bisect.bisect_left(_UpperCamelCase , _UpperCamelCase )
if index != len(_UpperCamelCase ) and sorted_collection[index] == item:
return index
return None
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
if right < left:
return None
__lowerCAmelCase : List[str] = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , midpoint - 1 )
else:
return binary_search_by_recursion(_UpperCamelCase , _UpperCamelCase , midpoint + 1 , _UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase__ = input("""Enter numbers separated by comma:\n""").strip()
lowerCamelCase__ = sorted(int(item) for item in user_input.split(""","""))
lowerCamelCase__ = int(input("""Enter a single number to be found in the list:\n"""))
lowerCamelCase__ = binary_search(collection, target)
if result is None:
print(f'{target} was not found in {collection}.')
else:
print(f'{target} was found at position {result} in {collection}.') | 86 | 1 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Optional[Any] = len(_UpperCamelCase )
__lowerCAmelCase : int = []
for i in range(len(_UpperCamelCase ) - pat_len + 1 ):
__lowerCAmelCase : Optional[Any] = True
for j in range(_UpperCamelCase ):
if s[i + j] != pattern[j]:
__lowerCAmelCase : str = False
break
if match_found:
position.append(_UpperCamelCase )
return position
if __name__ == "__main__":
assert naive_pattern_search("""ABCDEFG""", """DE""") == [3]
print(naive_pattern_search("""ABAAABCDBBABCDDEBCABC""", """ABC""")) | 86 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import is_tf_available, is_torch_available
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
if is_tf_available():
from transformers import (
AutoConfig,
BertConfig,
GPTaConfig,
TaConfig,
TFAutoModel,
TFAutoModelForCausalLM,
TFAutoModelForMaskedLM,
TFAutoModelForPreTraining,
TFAutoModelForQuestionAnswering,
TFAutoModelForSeqaSeqLM,
TFAutoModelForSequenceClassification,
TFAutoModelWithLMHead,
TFBertForMaskedLM,
TFBertForPreTraining,
TFBertForQuestionAnswering,
TFBertForSequenceClassification,
TFBertModel,
TFGPTaLMHeadModel,
TFRobertaForMaskedLM,
TFTaForConditionalGeneration,
)
from transformers.models.bert.modeling_tf_bert import TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.gpta.modeling_tf_gpta import TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST
from transformers.models.ta.modeling_tf_ta import TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST
if is_torch_available():
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForMaskedLM,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelWithLMHead,
BertForMaskedLM,
BertForPreTraining,
BertForQuestionAnswering,
BertForSequenceClassification,
BertModel,
GPTaLMHeadModel,
RobertaForMaskedLM,
TaForConditionalGeneration,
)
@is_pt_tf_cross_test
class A__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : Tuple = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TFAutoModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = AutoModel.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : int = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = TFAutoModelForPreTraining.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = AutoModelForPreTraining.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_GPT2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TFAutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : List[str] = TFAutoModelForCausalLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = AutoModelForCausalLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : List[Any] = AutoModelForCausalLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Optional[Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = TFAutoModelForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : int = TFAutoModelForMaskedLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = AutoModelForMaskedLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : str = AutoModelForMaskedLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
for model_name in TF_T5_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__lowerCAmelCase : Optional[int] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Tuple = TFAutoModelForSeqaSeqLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = AutoModelForSeqaSeqLM.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Dict = AutoModelForSeqaSeqLM.from_pretrained(
_SCREAMING_SNAKE_CASE , output_loading_info=_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : Dict = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = TFAutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = AutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# for model_name in TF_BERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
for model_name in ["bert-base-uncased"]:
__lowerCAmelCase : Union[str, Any] = AutoConfig.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = TFAutoModelForQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = AutoModelForQuestionAnswering.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
__lowerCAmelCase : Tuple = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = TFAutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 )
__lowerCAmelCase : Tuple = AutoModelWithLMHead.from_pretrained(_SCREAMING_SNAKE_CASE , from_tf=_SCREAMING_SNAKE_CASE )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual(model.num_parameters() , 1_44_10 )
self.assertEqual(model.num_parameters(only_trainable=_SCREAMING_SNAKE_CASE ) , 1_44_10 ) | 86 | 1 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Tuple = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def __lowerCAmelCase (_UpperCamelCase = 100 ):
__lowerCAmelCase : Optional[int] = 1
__lowerCAmelCase : Optional[Any] = 2
for i in range(2 , max_n + 1 ):
__lowerCAmelCase : Any = pre_numerator
__lowerCAmelCase : Union[str, Any] = 2 * i // 3 if i % 3 == 0 else 1
__lowerCAmelCase : int = cur_numerator
__lowerCAmelCase : Dict = e_cont * pre_numerator + temp
return sum_digits(_UpperCamelCase )
if __name__ == "__main__":
print(f'{solution() = }') | 86 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
lowerCamelCase__ = logging.get_logger(__name__) # pylint: disable=invalid-name
lowerCamelCase__ = """
Examples:
```py
>>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline
>>> import torch
>>> pipe_prior = KandinskyPriorPipeline.from_pretrained(\"kandinsky-community/Kandinsky-2-1-prior\")
>>> pipe_prior.to(\"cuda\")
>>> prompt = \"red cat, 4k photo\"
>>> out = pipe_prior(prompt)
>>> image_emb = out.image_embeds
>>> negative_image_emb = out.negative_image_embeds
>>> pipe = KandinskyPipeline.from_pretrained(\"kandinsky-community/kandinsky-2-1\")
>>> pipe.to(\"cuda\")
>>> image = pipe(
... prompt,
... image_embeds=image_emb,
... negative_image_embeds=negative_image_emb,
... height=768,
... width=768,
... num_inference_steps=100,
... ).images
>>> image[0].save(\"cat.png\")
```
"""
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=8 ):
__lowerCAmelCase : Dict = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
__lowerCAmelCase : List[str] = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class A__ ( _lowerCamelCase):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
super().__init__()
self.register_modules(
text_encoder=_SCREAMING_SNAKE_CASE , tokenizer=_SCREAMING_SNAKE_CASE , unet=_SCREAMING_SNAKE_CASE , scheduler=_SCREAMING_SNAKE_CASE , movq=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Dict = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if latents is None:
__lowerCAmelCase : Tuple = randn_tensor(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE , dtype=_SCREAMING_SNAKE_CASE )
else:
if latents.shape != shape:
raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}" )
__lowerCAmelCase : Any = latents.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = latents * scheduler.init_noise_sigma
return latents
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , ):
__lowerCAmelCase : Tuple = len(_SCREAMING_SNAKE_CASE ) if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) else 1
# get prompt text embeddings
__lowerCAmelCase : Dict = self.tokenizer(
_SCREAMING_SNAKE_CASE , padding='max_length' , truncation=_SCREAMING_SNAKE_CASE , max_length=77 , return_attention_mask=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors='pt' , )
__lowerCAmelCase : Tuple = text_inputs.input_ids
__lowerCAmelCase : Union[str, Any] = self.tokenizer(_SCREAMING_SNAKE_CASE , padding='longest' , return_tensors='pt' ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
'The following part of your input was truncated because CLIP can only handle sequences up to'
f" {self.tokenizer.model_max_length} tokens: {removed_text}" )
__lowerCAmelCase : Dict = text_input_ids.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = text_inputs.attention_mask.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : str = self.text_encoder(
input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = prompt_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : Dict = text_encoder_hidden_states.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : Optional[int] = text_mask.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
__lowerCAmelCase : List[str]
if negative_prompt is None:
__lowerCAmelCase : Union[str, Any] = [''] * batch_size
elif type(_SCREAMING_SNAKE_CASE ) is not type(_SCREAMING_SNAKE_CASE ):
raise TypeError(
f"`negative_prompt` should be the same type to `prompt`, but got {type(_SCREAMING_SNAKE_CASE )} !="
f" {type(_SCREAMING_SNAKE_CASE )}." )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = [negative_prompt]
elif batch_size != len(_SCREAMING_SNAKE_CASE ):
raise ValueError(
f"`negative_prompt`: {negative_prompt} has batch size {len(_SCREAMING_SNAKE_CASE )}, but `prompt`:"
f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
' the batch size of `prompt`.' )
else:
__lowerCAmelCase : Optional[int] = negative_prompt
__lowerCAmelCase : Tuple = self.tokenizer(
_SCREAMING_SNAKE_CASE , padding='max_length' , max_length=77 , truncation=_SCREAMING_SNAKE_CASE , return_attention_mask=_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE , return_tensors='pt' , )
__lowerCAmelCase : Union[str, Any] = uncond_input.input_ids.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = uncond_input.attention_mask.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : Any = self.text_encoder(
input_ids=_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__lowerCAmelCase : List[str] = negative_prompt_embeds.shape[1]
__lowerCAmelCase : Any = negative_prompt_embeds.repeat(1 , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = negative_prompt_embeds.view(batch_size * num_images_per_prompt , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = uncond_text_encoder_hidden_states.shape[1]
__lowerCAmelCase : List[Any] = uncond_text_encoder_hidden_states.repeat(1 , _SCREAMING_SNAKE_CASE , 1 )
__lowerCAmelCase : Optional[int] = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , _SCREAMING_SNAKE_CASE , -1 )
__lowerCAmelCase : Optional[Any] = uncond_text_mask.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__lowerCAmelCase : Tuple = torch.cat([negative_prompt_embeds, prompt_embeds] )
__lowerCAmelCase : Tuple = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
__lowerCAmelCase : int = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=0 ):
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
__lowerCAmelCase : Union[str, Any] = torch.device(f"cuda:{gpu_id}" )
__lowerCAmelCase : List[Any] = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=0 ):
if is_accelerate_available() and is_accelerate_version('>=' , '0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
__lowerCAmelCase : str = torch.device(f"cuda:{gpu_id}" )
if self.device.type != "cpu":
self.to('cpu' , silence_dtype_warnings=_SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
__lowerCAmelCase : Any = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
__lowerCAmelCase , __lowerCAmelCase : Any = cpu_offload_with_hook(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , prev_module_hook=_SCREAMING_SNAKE_CASE )
if self.safety_checker is not None:
__lowerCAmelCase , __lowerCAmelCase : Dict = cpu_offload_with_hook(self.safety_checker , _SCREAMING_SNAKE_CASE , prev_module_hook=_SCREAMING_SNAKE_CASE )
# We'll offload the last model manually.
__lowerCAmelCase : Optional[Any] = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def __lowerCamelCase ( self ):
if not hasattr(self.unet , '_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(_SCREAMING_SNAKE_CASE , '_hf_hook' )
and hasattr(module._hf_hook , 'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(_SCREAMING_SNAKE_CASE )
def __call__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = 5_12 , _SCREAMING_SNAKE_CASE = 5_12 , _SCREAMING_SNAKE_CASE = 1_00 , _SCREAMING_SNAKE_CASE = 4.0 , _SCREAMING_SNAKE_CASE = 1 , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = None , _SCREAMING_SNAKE_CASE = "pil" , _SCREAMING_SNAKE_CASE = True , ):
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = 1
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = len(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(_SCREAMING_SNAKE_CASE )}" )
__lowerCAmelCase : Dict = self._execution_device
__lowerCAmelCase : Optional[Any] = batch_size * num_images_per_prompt
__lowerCAmelCase : Optional[int] = guidance_scale > 1.0
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : int = self._encode_prompt(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 )
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = torch.cat(_SCREAMING_SNAKE_CASE , dim=0 )
if do_classifier_free_guidance:
__lowerCAmelCase : Optional[Any] = image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : int = negative_image_embeds.repeat_interleave(_SCREAMING_SNAKE_CASE , dim=0 )
__lowerCAmelCase : Any = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=_SCREAMING_SNAKE_CASE )
self.scheduler.set_timesteps(_SCREAMING_SNAKE_CASE , device=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = self.scheduler.timesteps
__lowerCAmelCase : int = self.unet.config.in_channels
__lowerCAmelCase , __lowerCAmelCase : Any = get_new_h_w(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.movq_scale_factor )
# create initial latent
__lowerCAmelCase : str = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , self.scheduler , )
for i, t in enumerate(self.progress_bar(_SCREAMING_SNAKE_CASE ) ):
# expand the latents if we are doing classifier free guidance
__lowerCAmelCase : List[str] = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__lowerCAmelCase : Union[str, Any] = {'text_embeds': prompt_embeds, 'image_embeds': image_embeds}
__lowerCAmelCase : Optional[Any] = self.unet(
sample=_SCREAMING_SNAKE_CASE , timestep=_SCREAMING_SNAKE_CASE , encoder_hidden_states=_SCREAMING_SNAKE_CASE , added_cond_kwargs=_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , )[0]
if do_classifier_free_guidance:
__lowerCAmelCase , __lowerCAmelCase : Dict = noise_pred.split(latents.shape[1] , dim=1 )
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = noise_pred.chunk(2 )
__lowerCAmelCase , __lowerCAmelCase : int = variance_pred.chunk(2 )
__lowerCAmelCase : Dict = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
__lowerCAmelCase : Any = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , 'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
__lowerCAmelCase : List[str] = self.scheduler.step(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , ).prev_sample
# post-processing
__lowerCAmelCase : Tuple = self.movq.decode(_SCREAMING_SNAKE_CASE , force_not_quantize=_SCREAMING_SNAKE_CASE )['sample']
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}" )
if output_type in ["np", "pil"]:
__lowerCAmelCase : List[str] = image * 0.5 + 0.5
__lowerCAmelCase : Dict = image.clamp(0 , 1 )
__lowerCAmelCase : Dict = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
__lowerCAmelCase : Union[str, Any] = self.numpy_to_pil(_SCREAMING_SNAKE_CASE )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_SCREAMING_SNAKE_CASE ) | 86 | 1 |
"""simple docstring"""
from __future__ import annotations
import sys
from collections import deque
from typing import Generic, TypeVar
lowerCamelCase__ = TypeVar("""T""")
class A__ ( Generic[T]):
A_ : deque[T] # Cache store of keys
A_ : set[T] # References of the keys in cache
A_ : int = 1_0 # Maximum capacity of cache
def __init__( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = deque()
__lowerCAmelCase : int = set()
if not n:
__lowerCAmelCase : Union[str, Any] = sys.maxsize
elif n < 0:
raise ValueError('n should be an integer greater than 0.' )
else:
__lowerCAmelCase : List[str] = n
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
if x not in self.key_reference:
if len(self.dq_store ) == LRUCache._MAX_CAPACITY:
__lowerCAmelCase : int = self.dq_store.pop()
self.key_reference.remove(_SCREAMING_SNAKE_CASE )
else:
self.dq_store.remove(_SCREAMING_SNAKE_CASE )
self.dq_store.appendleft(_SCREAMING_SNAKE_CASE )
self.key_reference.add(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
for k in self.dq_store:
print(_SCREAMING_SNAKE_CASE )
def __repr__( self ):
return f"LRUCache({self._MAX_CAPACITY}) => {list(self.dq_store )}"
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = LRUCache(4)
lru_cache.refer("""A""")
lru_cache.refer(2)
lru_cache.refer(3)
lru_cache.refer("""A""")
lru_cache.refer(4)
lru_cache.refer(5)
lru_cache.display()
print(lru_cache)
assert str(lru_cache) == "LRUCache(4) => [5, 4, 'A', 3]" | 86 |
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : Union[str, Any] = BarthezTokenizer
A_ : Tuple = BarthezTokenizerFast
A_ : Dict = True
A_ : List[str] = True
def __lowerCamelCase ( self ):
super().setUp()
__lowerCAmelCase : str = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = tokenizer
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = '<pad>'
__lowerCAmelCase : Union[str, Any] = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , 10_11_22 )
def __lowerCamelCase ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 10_11_22 )
@require_torch
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__lowerCAmelCase : Optional[Any] = [0, 57, 30_18, 7_03_07, 91, 2]
__lowerCAmelCase : Optional[int] = self.tokenizer(
_SCREAMING_SNAKE_CASE , max_length=len(_SCREAMING_SNAKE_CASE ) , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
self.assertIsInstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
__lowerCAmelCase : List[str] = batch.input_ids.tolist()[0]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
if not self.test_rust_tokenizer:
return
__lowerCAmelCase : Tuple = self.get_tokenizer()
__lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
__lowerCAmelCase : List[str] = 'I was born in 92000, and this is falsé.'
__lowerCAmelCase : Optional[int] = tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = rust_tokenizer.tokenize(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE , add_special_tokens=_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = self.get_rust_tokenizer()
__lowerCAmelCase : List[Any] = tokenizer.encode(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = rust_tokenizer.encode(_SCREAMING_SNAKE_CASE )
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
# fmt: off
__lowerCAmelCase : str = {'input_ids': [[0, 4_90, 1_43_28, 45_07, 3_54, 47, 4_36_69, 95, 25, 7_81_17, 2_02_15, 1_97_79, 1_90, 22, 4_00, 4, 3_53_43, 8_03_10, 6_03, 86, 2_49_37, 1_05, 3_34_38, 9_47_62, 1_96, 3_96_42, 7, 15, 1_59_33, 1_73, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 1_05_34, 87, 25, 66, 33_58, 1_96, 5_52_89, 8, 8_29_61, 81, 22_04, 7_52_03, 7, 15, 7_63, 1_29_56, 2_16, 1_78, 1_43_28, 95_95, 13_77, 6_96_93, 7, 4_48, 7_10_21, 1_96, 1_81_06, 14_37, 1_39_74, 1_08, 90_83, 4, 4_93_15, 7, 39, 86, 13_26, 27_93, 4_63_33, 4, 4_48, 1_96, 7_45_88, 7, 4_93_15, 7, 39, 21, 8_22, 3_84_70, 74, 21, 6_67_23, 6_24_80, 8, 2_20_50, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
__lowerCAmelCase : Union[str, Any] = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=_SCREAMING_SNAKE_CASE , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=_SCREAMING_SNAKE_CASE , ) | 86 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {
"""configuration_deberta""": ["""DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP""", """DebertaConfig""", """DebertaOnnxConfig"""],
"""tokenization_deberta""": ["""DebertaTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""DebertaTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""DebertaForMaskedLM""",
"""DebertaForQuestionAnswering""",
"""DebertaForSequenceClassification""",
"""DebertaForTokenClassification""",
"""DebertaModel""",
"""DebertaPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFDebertaForMaskedLM""",
"""TFDebertaForQuestionAnswering""",
"""TFDebertaForSequenceClassification""",
"""TFDebertaForTokenClassification""",
"""TFDebertaModel""",
"""TFDebertaPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_deberta import DEBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, DebertaConfig, DebertaOnnxConfig
from .tokenization_deberta import DebertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_deberta_fast import DebertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_deberta import (
DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
DebertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_deberta import (
TF_DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFDebertaForMaskedLM,
TFDebertaForQuestionAnswering,
TFDebertaForSequenceClassification,
TFDebertaForTokenClassification,
TFDebertaModel,
TFDebertaPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 86 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""sail/poolformer_s12""": """https://huggingface.co/sail/poolformer_s12/resolve/main/config.json""",
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
}
class A__ ( _lowerCamelCase):
A_ : Optional[int] = 'poolformer'
def __init__( self , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4.0 , _SCREAMING_SNAKE_CASE=[2, 2, 6, 2] , _SCREAMING_SNAKE_CASE=[64, 1_28, 3_20, 5_12] , _SCREAMING_SNAKE_CASE=[7, 3, 3, 3] , _SCREAMING_SNAKE_CASE=[4, 2, 2, 2] , _SCREAMING_SNAKE_CASE=[2, 1, 1, 1] , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE=0.02 , **_SCREAMING_SNAKE_CASE , ):
__lowerCAmelCase : int = num_channels
__lowerCAmelCase : str = patch_size
__lowerCAmelCase : Optional[Any] = stride
__lowerCAmelCase : Optional[int] = padding
__lowerCAmelCase : List[Any] = pool_size
__lowerCAmelCase : int = hidden_sizes
__lowerCAmelCase : str = mlp_ratio
__lowerCAmelCase : Optional[int] = depths
__lowerCAmelCase : str = patch_sizes
__lowerCAmelCase : str = strides
__lowerCAmelCase : Optional[int] = num_encoder_blocks
__lowerCAmelCase : Any = drop_path_rate
__lowerCAmelCase : Any = hidden_act
__lowerCAmelCase : Dict = use_layer_scale
__lowerCAmelCase : Union[str, Any] = layer_scale_init_value
__lowerCAmelCase : Dict = initializer_range
super().__init__(**_SCREAMING_SNAKE_CASE )
class A__ ( _lowerCamelCase):
A_ : List[str] = version.parse('1.11')
@property
def __lowerCamelCase ( self ):
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __lowerCamelCase ( self ):
return 2E-3 | 86 | 1 |
"""simple docstring"""
import torch
from diffusers import UnCLIPScheduler
from .test_schedulers import SchedulerCommonTest
class A__ ( _lowerCamelCase):
A_ : Optional[int] = (UnCLIPScheduler,)
def __lowerCamelCase ( self , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = {
'num_train_timesteps': 10_00,
'variance_type': 'fixed_small_log',
'clip_sample': True,
'clip_sample_range': 1.0,
'prediction_type': 'epsilon',
}
config.update(**_SCREAMING_SNAKE_CASE )
return config
def __lowerCamelCase ( self ):
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
for variance in ["fixed_small_log", "learned_range"]:
self.check_over_configs(variance_type=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
for clip_sample_range in [1, 5, 10, 20]:
self.check_over_configs(clip_sample_range=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(prediction_type=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
for time_step in [0, 5_00, 9_99]:
for prev_timestep in [None, 5, 1_00, 2_50, 5_00, 7_50]:
if prev_timestep is not None and prev_timestep >= time_step:
continue
self.check_over_forward(time_step=_SCREAMING_SNAKE_CASE , prev_timestep=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.scheduler_classes[0]
__lowerCAmelCase : Union[str, Any] = self.get_scheduler_config(variance_type='fixed_small_log' )
__lowerCAmelCase : Tuple = scheduler_class(**_SCREAMING_SNAKE_CASE )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 1.00_00E-10 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.054_9625 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.999_4987 ) ) < 1E-5
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = self.scheduler_classes[0]
__lowerCAmelCase : Dict = self.get_scheduler_config(variance_type='learned_range' )
__lowerCAmelCase : Union[str, Any] = scheduler_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = 0.5
assert scheduler._get_variance(1 , predicted_variance=_SCREAMING_SNAKE_CASE ) - -10.171_2790 < 1E-5
assert scheduler._get_variance(4_87 , predicted_variance=_SCREAMING_SNAKE_CASE ) - -5.799_8052 < 1E-5
assert scheduler._get_variance(9_99 , predicted_variance=_SCREAMING_SNAKE_CASE ) - -0.001_0011 < 1E-5
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.scheduler_classes[0]
__lowerCAmelCase : Optional[int] = self.get_scheduler_config()
__lowerCAmelCase : Union[str, Any] = scheduler_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = scheduler.timesteps
__lowerCAmelCase : Optional[int] = self.dummy_model()
__lowerCAmelCase : int = self.dummy_sample_deter
__lowerCAmelCase : str = torch.manual_seed(0 )
for i, t in enumerate(_SCREAMING_SNAKE_CASE ):
# 1. predict noise residual
__lowerCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# 2. predict previous mean of sample x_t-1
__lowerCAmelCase : int = scheduler.step(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
__lowerCAmelCase : str = pred_prev_sample
__lowerCAmelCase : List[str] = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : List[Any] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 252.268_2495 ) < 1E-2
assert abs(result_mean.item() - 0.328_4743 ) < 1E-3
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = self.scheduler_classes[0]
__lowerCAmelCase : Any = self.get_scheduler_config()
__lowerCAmelCase : Tuple = scheduler_class(**_SCREAMING_SNAKE_CASE )
scheduler.set_timesteps(25 )
__lowerCAmelCase : int = scheduler.timesteps
__lowerCAmelCase : List[Any] = self.dummy_model()
__lowerCAmelCase : List[Any] = self.dummy_sample_deter
__lowerCAmelCase : Any = torch.manual_seed(0 )
for i, t in enumerate(_SCREAMING_SNAKE_CASE ):
# 1. predict noise residual
__lowerCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if i + 1 == timesteps.shape[0]:
__lowerCAmelCase : Any = None
else:
__lowerCAmelCase : Optional[Any] = timesteps[i + 1]
# 2. predict previous mean of sample x_t-1
__lowerCAmelCase : Union[str, Any] = scheduler.step(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , prev_timestep=_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE ).prev_sample
__lowerCAmelCase : List[Any] = pred_prev_sample
__lowerCAmelCase : str = torch.sum(torch.abs(_SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Union[str, Any] = torch.mean(torch.abs(_SCREAMING_SNAKE_CASE ) )
assert abs(result_sum.item() - 258.204_4983 ) < 1E-2
assert abs(result_mean.item() - 0.336_2038 ) < 1E-3
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
pass | 86 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : Union[str, Any] = DiTPipeline
A_ : Union[str, Any] = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
A_ : List[Any] = PipelineTesterMixin.required_optional_params - {
'latents',
'num_images_per_prompt',
'callback',
'callback_steps',
}
A_ : Optional[Any] = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
A_ : Tuple = False
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : List[str] = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=_SCREAMING_SNAKE_CASE , activation_fn='gelu-approximate' , num_embeds_ada_norm=10_00 , norm_type='ada_norm_zero' , norm_elementwise_affine=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : str = AutoencoderKL()
__lowerCAmelCase : Union[str, Any] = DDIMScheduler()
__lowerCAmelCase : Dict = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
__lowerCAmelCase : List[str] = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : List[str] = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = 'cpu'
__lowerCAmelCase : Any = self.get_dummy_components()
__lowerCAmelCase : Union[str, Any] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = pipe(**_SCREAMING_SNAKE_CASE ).images
__lowerCAmelCase : List[str] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
__lowerCAmelCase : Optional[int] = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
__lowerCAmelCase : List[Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(_SCREAMING_SNAKE_CASE , 1E-3 )
def __lowerCamelCase ( self ):
self._test_inference_batch_single_identical(relax_max_difference=_SCREAMING_SNAKE_CASE , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __lowerCamelCase ( self ):
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = torch.manual_seed(0 )
__lowerCAmelCase : int = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
__lowerCAmelCase : Optional[Any] = ['vase', 'umbrella', 'white shark', 'white wolf']
__lowerCAmelCase : Optional[Any] = pipe.get_label_ids(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=40 , output_type='np' ).images
for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = load_numpy(
f"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy" )
assert np.abs((expected_image - image).max() ) < 1E-2
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
__lowerCAmelCase : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
__lowerCAmelCase : Dict = ['vase', 'umbrella']
__lowerCAmelCase : List[str] = pipe.get_label_ids(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = torch.manual_seed(0 )
__lowerCAmelCase : Optional[Any] = pipe(_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , num_inference_steps=25 , output_type='np' ).images
for word, image in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
f"/dit/{word}_512.npy" )
assert np.abs((expected_image - image).max() ) < 1E-1 | 86 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , ):
__lowerCAmelCase : Dict = parent
__lowerCAmelCase : Union[str, Any] = 13
__lowerCAmelCase : Dict = 7
__lowerCAmelCase : Dict = True
__lowerCAmelCase : List[Any] = True
__lowerCAmelCase : Dict = True
__lowerCAmelCase : List[Any] = True
__lowerCAmelCase : List[Any] = 99
__lowerCAmelCase : Dict = 32
__lowerCAmelCase : List[str] = 2
__lowerCAmelCase : str = 4
__lowerCAmelCase : int = 37
__lowerCAmelCase : Dict = 'gelu'
__lowerCAmelCase : str = 0.1
__lowerCAmelCase : List[Any] = 0.1
__lowerCAmelCase : Dict = 5_12
__lowerCAmelCase : List[Any] = 16
__lowerCAmelCase : Union[str, Any] = 2
__lowerCAmelCase : Optional[Any] = 0.02
__lowerCAmelCase : Optional[int] = 3
__lowerCAmelCase : List[str] = 4
__lowerCAmelCase : List[str] = None
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : Tuple = None
if self.use_input_mask:
__lowerCAmelCase : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : Optional[int] = None
if self.use_token_type_ids:
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase : str = None
__lowerCAmelCase : Union[str, Any] = None
__lowerCAmelCase : Optional[Any] = None
if self.use_labels:
__lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : int = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase : Union[str, Any] = RoFormerConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_SCREAMING_SNAKE_CASE , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = TFRoFormerModel(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__lowerCAmelCase : str = [input_ids, input_mask]
__lowerCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : Any = TFRoFormerForCausalLM(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase : Tuple = model(_SCREAMING_SNAKE_CASE )['logits']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) , [self.batch_size, self.seq_length, self.vocab_size] )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = TFRoFormerForMaskedLM(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = self.num_labels
__lowerCAmelCase : Optional[int] = TFRoFormerForSequenceClassification(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase : str = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = self.num_choices
__lowerCAmelCase : str = TFRoFormerForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase : Tuple = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase : int = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase : Tuple = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__lowerCAmelCase : List[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = self.num_labels
__lowerCAmelCase : List[Any] = TFRoFormerForTokenClassification(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = TFRoFormerForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) : Dict = config_and_inputs
__lowerCAmelCase : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : Any = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
A_ : Optional[int] = (
{
'feature-extraction': TFRoFormerModel,
'fill-mask': TFRoFormerForMaskedLM,
'question-answering': TFRoFormerForQuestionAnswering,
'text-classification': TFRoFormerForSequenceClassification,
'text-generation': TFRoFormerForCausalLM,
'token-classification': TFRoFormerForTokenClassification,
'zero-shot': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
A_ : Dict = False
A_ : Tuple = False
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = TFRoFormerModelTester(self )
__lowerCAmelCase : str = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = TFRoFormerModel.from_pretrained('junnyu/roformer_chinese_base' )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
@require_tf
class A__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = TFRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
__lowerCAmelCase : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE )[0]
# TODO Replace vocab size
__lowerCAmelCase : Union[str, Any] = 5_00_00
__lowerCAmelCase : Dict = [1, 6, vocab_size]
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
__lowerCAmelCase : Dict = tf.constant(
[
[
[-0.1205_3341, -1.026_4901, 0.2922_1946],
[-1.513_3783, 0.19_7433, 0.1519_0607],
[-5.013_5403, -3.90_0256, -0.8403_8764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 )
@require_tf
class A__ ( unittest.TestCase):
A_ : Optional[int] = 1e-4
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = tf.constant([[4, 10]] )
__lowerCAmelCase : int = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 , embedding_dim=6 )
__lowerCAmelCase : str = emba(input_ids.shape )
__lowerCAmelCase : Dict = tf.constant(
[[0.0000, 0.0000, 0.0000, 1.0000, 1.0000, 1.0000], [0.8415, 0.0464, 0.0022, 0.5403, 0.9989, 1.0000]] )
tf.debugging.assert_near(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=self.tolerance )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = tf.constant(
[
[0.0000, 0.0000, 0.0000, 0.0000, 0.0000],
[0.8415, 0.8219, 0.8020, 0.7819, 0.7617],
[0.9093, 0.9364, 0.9581, 0.9749, 0.9870],
] )
__lowerCAmelCase : Dict = TFRoFormerSinusoidalPositionalEmbedding(num_positions=5_12 , embedding_dim=5_12 )
emba([2, 16, 5_12] )
__lowerCAmelCase : Optional[Any] = emba.weight[:3, :5]
tf.debugging.assert_near(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , atol=self.tolerance )
@require_tf
class A__ ( unittest.TestCase):
A_ : Union[str, Any] = 1e-4
def __lowerCamelCase ( self ):
# 2,12,16,64
__lowerCAmelCase : Tuple = tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00
__lowerCAmelCase : List[Any] = -tf.reshape(tf.range(2 * 12 * 16 * 64 , dtype=tf.floataa ) , shape=(2, 12, 16, 64) ) / 1_00
__lowerCAmelCase : str = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 , embedding_dim=64 )
__lowerCAmelCase : Dict = embed_positions([2, 16, 7_68] )[None, None, :, :]
__lowerCAmelCase , __lowerCAmelCase : Tuple = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = tf.constant(
[
[0.0000, 0.0100, 0.0200, 0.0300, 0.0400, 0.0500, 0.0600, 0.0700],
[-0.2012, 0.8897, 0.0263, 0.9401, 0.2074, 0.9463, 0.3481, 0.9343],
[-1.7057, 0.6271, -1.2145, 1.3897, -0.6303, 1.7647, -0.1173, 1.8985],
[-2.1731, -1.6397, -2.7358, 0.2854, -2.1840, 1.7183, -1.3018, 2.4871],
[0.2717, -3.6173, -2.9206, -2.1988, -3.6638, 0.3858, -2.9155, 2.2980],
[3.9859, -2.1580, -0.7984, -4.4904, -4.1181, -2.0252, -4.4782, 1.1253],
] )
__lowerCAmelCase : Dict = tf.constant(
[
[0.0000, -0.0100, -0.0200, -0.0300, -0.0400, -0.0500, -0.0600, -0.0700],
[0.2012, -0.8897, -0.0263, -0.9401, -0.2074, -0.9463, -0.3481, -0.9343],
[1.7057, -0.6271, 1.2145, -1.3897, 0.6303, -1.7647, 0.1173, -1.8985],
[2.1731, 1.6397, 2.7358, -0.2854, 2.1840, -1.7183, 1.3018, -2.4871],
[-0.2717, 3.6173, 2.9206, 2.1988, 3.6638, -0.3858, 2.9155, -2.2980],
[-3.9859, 2.1580, 0.7984, 4.4904, 4.1181, 2.0252, 4.4782, -1.1253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] , _SCREAMING_SNAKE_CASE , atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] , _SCREAMING_SNAKE_CASE , atol=self.tolerance ) | 86 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : str = ShapEImgaImgPipeline
A_ : str = ['image']
A_ : int = ['image']
A_ : Tuple = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
A_ : Tuple = False
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return self.time_input_dim * 4
@property
def __lowerCamelCase ( self ):
return 8
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Any = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
__lowerCAmelCase : Tuple = CLIPVisionModel(_SCREAMING_SNAKE_CASE )
return model
@property
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = CLIPImageProcessor(
crop_size=2_24 , do_center_crop=_SCREAMING_SNAKE_CASE , do_normalize=_SCREAMING_SNAKE_CASE , do_resize=_SCREAMING_SNAKE_CASE , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_24 , )
return image_processor
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Optional[Any] = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__lowerCAmelCase : List[Any] = PriorTransformer(**_SCREAMING_SNAKE_CASE )
return model
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Dict = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__lowerCAmelCase : int = ShapERenderer(**_SCREAMING_SNAKE_CASE )
return model
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.dummy_prior
__lowerCAmelCase : List[Any] = self.dummy_image_encoder
__lowerCAmelCase : int = self.dummy_image_processor
__lowerCAmelCase : Any = self.dummy_renderer
__lowerCAmelCase : Any = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=10_24 , prediction_type='sample' , use_karras_sigmas=_SCREAMING_SNAKE_CASE , clip_sample=_SCREAMING_SNAKE_CASE , clip_sample_range=1.0 , )
__lowerCAmelCase : Tuple = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
__lowerCAmelCase : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
__lowerCAmelCase : int = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : str = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = 'cpu'
__lowerCAmelCase : Dict = self.get_dummy_components()
__lowerCAmelCase : Optional[int] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = pipe(**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Any = output.images[0]
__lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__lowerCAmelCase : List[Any] = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = torch_device == 'cpu'
__lowerCAmelCase : Optional[Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_SCREAMING_SNAKE_CASE , relax_max_difference=_SCREAMING_SNAKE_CASE , )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.get_dummy_components()
__lowerCAmelCase : List[str] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = 1
__lowerCAmelCase : List[str] = 2
__lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
for key in inputs.keys():
if key in self.batch_params:
__lowerCAmelCase : Optional[Any] = batch_size * [inputs[key]]
__lowerCAmelCase : List[str] = pipe(**_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' )
__lowerCAmelCase : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy' )
__lowerCAmelCase : Union[str, Any] = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' )
__lowerCAmelCase : Dict = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
__lowerCAmelCase : int = pipe(
_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) | 86 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCamelCase__ = {
"""configuration_perceiver""": ["""PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """PerceiverConfig""", """PerceiverOnnxConfig"""],
"""tokenization_perceiver""": ["""PerceiverTokenizer"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""PerceiverFeatureExtractor"""]
lowerCamelCase__ = ["""PerceiverImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""PerceiverForImageClassificationConvProcessing""",
"""PerceiverForImageClassificationFourier""",
"""PerceiverForImageClassificationLearned""",
"""PerceiverForMaskedLM""",
"""PerceiverForMultimodalAutoencoding""",
"""PerceiverForOpticalFlow""",
"""PerceiverForSequenceClassification""",
"""PerceiverLayer""",
"""PerceiverModel""",
"""PerceiverPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 86 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {"""configuration_fnet""": ["""FNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""FNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""FNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""FNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FNetForMaskedLM""",
"""FNetForMultipleChoice""",
"""FNetForNextSentencePrediction""",
"""FNetForPreTraining""",
"""FNetForQuestionAnswering""",
"""FNetForSequenceClassification""",
"""FNetForTokenClassification""",
"""FNetLayer""",
"""FNetModel""",
"""FNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 86 | 1 |
"""simple docstring"""
from io import BytesIO
from typing import List, Union
import requests
from ..utils import add_end_docstrings, is_decord_available, is_torch_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_decord_available():
import numpy as np
from decord import VideoReader
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(_lowerCamelCase)
class A__ ( _lowerCamelCase):
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
requires_backends(self , 'decord' )
self.check_model_type(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None ):
__lowerCAmelCase : Union[str, Any] = {}
if frame_sampling_rate is not None:
__lowerCAmelCase : Optional[int] = frame_sampling_rate
if num_frames is not None:
__lowerCAmelCase : int = num_frames
__lowerCAmelCase : Any = {}
if top_k is not None:
__lowerCAmelCase : Optional[Any] = top_k
return preprocess_params, {}, postprocess_params
def __call__( self , _SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return super().__call__(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=1 ):
if num_frames is None:
__lowerCAmelCase : Union[str, Any] = self.model.config.num_frames
if video.startswith('http://' ) or video.startswith('https://' ):
__lowerCAmelCase : Tuple = BytesIO(requests.get(_SCREAMING_SNAKE_CASE ).content )
__lowerCAmelCase : str = VideoReader(_SCREAMING_SNAKE_CASE )
videoreader.seek(0 )
__lowerCAmelCase : Union[str, Any] = 0
__lowerCAmelCase : str = num_frames * frame_sampling_rate - 1
__lowerCAmelCase : Union[str, Any] = np.linspace(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , num=_SCREAMING_SNAKE_CASE , dtype=np.intaa )
__lowerCAmelCase : int = videoreader.get_batch(_SCREAMING_SNAKE_CASE ).asnumpy()
__lowerCAmelCase : Dict = list(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = self.image_processor(_SCREAMING_SNAKE_CASE , return_tensors=self.framework )
return model_inputs
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = self.model(**_SCREAMING_SNAKE_CASE )
return model_outputs
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=5 ):
if top_k > self.model.config.num_labels:
__lowerCAmelCase : List[str] = self.model.config.num_labels
if self.framework == "pt":
__lowerCAmelCase : Union[str, Any] = model_outputs.logits.softmax(-1 )[0]
__lowerCAmelCase , __lowerCAmelCase : Any = probs.topk(_SCREAMING_SNAKE_CASE )
else:
raise ValueError(f"Unsupported framework: {self.framework}" )
__lowerCAmelCase : Any = scores.tolist()
__lowerCAmelCase : List[str] = ids.tolist()
return [{"score": score, "label": self.model.config.idalabel[_id]} for score, _id in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )] | 86 |
"""simple docstring"""
import math
import sys
def __lowerCAmelCase (_UpperCamelCase ):
if number != int(_UpperCamelCase ):
raise ValueError('the value of input must be a natural number' )
if number < 0:
raise ValueError('the value of input must not be a negative number' )
if number == 0:
return 1
__lowerCAmelCase : Any = [-1] * (number + 1)
__lowerCAmelCase : List[Any] = 0
for i in range(1 , number + 1 ):
__lowerCAmelCase : List[Any] = sys.maxsize
__lowerCAmelCase : Optional[int] = int(math.sqrt(_UpperCamelCase ) )
for j in range(1 , root + 1 ):
__lowerCAmelCase : Optional[Any] = 1 + answers[i - (j**2)]
__lowerCAmelCase : Any = min(_UpperCamelCase , _UpperCamelCase )
__lowerCAmelCase : List[str] = answer
return answers[number]
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 | 1 |
"""simple docstring"""
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=[1, 2, 1] , _SCREAMING_SNAKE_CASE=[2, 2, 4] , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2.0 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.0 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-5 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=10 , _SCREAMING_SNAKE_CASE=8 , _SCREAMING_SNAKE_CASE=["stage1", "stage2", "stage3"] , _SCREAMING_SNAKE_CASE=[1, 2, 3] , ):
__lowerCAmelCase : List[str] = parent
__lowerCAmelCase : Tuple = batch_size
__lowerCAmelCase : str = image_size
__lowerCAmelCase : Any = patch_size
__lowerCAmelCase : str = num_channels
__lowerCAmelCase : str = embed_dim
__lowerCAmelCase : Optional[Any] = depths
__lowerCAmelCase : Tuple = num_heads
__lowerCAmelCase : int = window_size
__lowerCAmelCase : List[str] = mlp_ratio
__lowerCAmelCase : Tuple = qkv_bias
__lowerCAmelCase : Union[str, Any] = hidden_dropout_prob
__lowerCAmelCase : List[str] = attention_probs_dropout_prob
__lowerCAmelCase : str = drop_path_rate
__lowerCAmelCase : Any = hidden_act
__lowerCAmelCase : Any = use_absolute_embeddings
__lowerCAmelCase : Dict = patch_norm
__lowerCAmelCase : int = layer_norm_eps
__lowerCAmelCase : Any = initializer_range
__lowerCAmelCase : Tuple = is_training
__lowerCAmelCase : Tuple = scope
__lowerCAmelCase : Dict = use_labels
__lowerCAmelCase : Tuple = type_sequence_label_size
__lowerCAmelCase : List[str] = encoder_stride
__lowerCAmelCase : int = out_features
__lowerCAmelCase : List[str] = out_indices
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__lowerCAmelCase : List[Any] = None
if self.use_labels:
__lowerCAmelCase : Any = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : str = self.get_config()
return config, pixel_values, labels
def __lowerCamelCase ( self ):
return MaskFormerSwinConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = MaskFormerSwinModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Tuple = model(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__lowerCAmelCase : Union[str, Any] = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : List[str] = model(_SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , [16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = ['stem']
__lowerCAmelCase : Any = MaskFormerSwinBackbone(config=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = config_and_inputs
__lowerCAmelCase : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : List[Any] = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
A_ : Union[str, Any] = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
A_ : str = False
A_ : Union[str, Any] = False
A_ : Optional[Any] = False
A_ : Optional[int] = False
A_ : Optional[int] = False
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = MaskFormerSwinModelTester(self )
__lowerCAmelCase : Optional[int] = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
'`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn\'t work well with'
' `nn.DataParallel`'
) )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def __lowerCamelCase ( self ):
return
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_SCREAMING_SNAKE_CASE )
@unittest.skip('Swin does not use inputs_embeds' )
def __lowerCamelCase ( self ):
pass
@unittest.skip('Swin does not support feedforward chunking' )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : List[str] = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
__lowerCAmelCase : Any = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE , nn.Linear ) )
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__lowerCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__lowerCAmelCase : Union[str, Any] = [*signature.parameters.keys()]
__lowerCAmelCase : Optional[int] = ['pixel_values']
self.assertListEqual(arg_names[:1] , _SCREAMING_SNAKE_CASE )
@unittest.skip(reason='MaskFormerSwin is only used as backbone and doesn\'t support output_attentions' )
def __lowerCamelCase ( self ):
pass
@unittest.skip(reason='MaskFormerSwin is only used as an internal backbone' )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__lowerCAmelCase : str = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Dict = outputs.hidden_states
__lowerCAmelCase : str = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
# Swin has a different seq_length
__lowerCAmelCase : Dict = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCAmelCase : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__lowerCAmelCase : List[str] = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase : Optional[Any] = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Optional[Any] = 3
__lowerCAmelCase : Union[str, Any] = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__lowerCAmelCase : str = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__lowerCAmelCase : str = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__lowerCAmelCase : Optional[Any] = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__lowerCAmelCase : Optional[Any] = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__lowerCAmelCase : Any = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , (padded_height, padded_width) )
@unittest.skip(reason='MaskFormerSwin doesn\'t have pretrained checkpoints' )
def __lowerCamelCase ( self ):
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def __lowerCamelCase ( self ):
pass
@unittest.skip(reason='This will be fixed once MaskFormerSwin is replaced by native Swin' )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = 0
return t
def check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE={} ):
with torch.no_grad():
__lowerCAmelCase : str = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = model(**_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ).to_tuple()
def recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if isinstance(_SCREAMING_SNAKE_CASE , (List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() , dict_object.values() ):
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , set_nan_tensor_to_zero(_SCREAMING_SNAKE_CASE ) , atol=1E-5 ) , msg=(
'Tuple and dict output are not equal. Difference:'
f" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"
f" {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}. Dict has"
f" `nan`: {torch.isnan(_SCREAMING_SNAKE_CASE ).any()} and `inf`: {torch.isinf(_SCREAMING_SNAKE_CASE )}."
) , )
recursive_check(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
__lowerCAmelCase : int = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Dict = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {'output_hidden_states': True} )
__lowerCAmelCase : Optional[int] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
check_equivalence(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , {'output_hidden_states': True} )
@require_torch
class A__ ( unittest.TestCase , _lowerCamelCase):
A_ : Tuple = (MaskFormerSwinBackbone,) if is_torch_available() else ()
A_ : Union[str, Any] = MaskFormerSwinConfig
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = MaskFormerSwinModelTester(self )
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Any = inputs_dict['pixel_values'].shape[0]
for backbone_class in self.all_model_classes:
__lowerCAmelCase : int = backbone_class(_SCREAMING_SNAKE_CASE )
backbone.to(_SCREAMING_SNAKE_CASE )
backbone.eval()
__lowerCAmelCase : Union[str, Any] = backbone(**_SCREAMING_SNAKE_CASE )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps , _SCREAMING_SNAKE_CASE )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps , backbone.channels ):
self.assertTrue(feature_map.shape[:2] , (batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__lowerCAmelCase : Tuple = backbone(**_SCREAMING_SNAKE_CASE , output_hidden_states=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) , len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] , backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Optional[int] = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) , (batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__lowerCAmelCase : str = backbone(**_SCREAMING_SNAKE_CASE , output_attentions=_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(outputs.attentions ) | 86 |
"""simple docstring"""
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=14 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=0.02 , ):
__lowerCAmelCase : Union[str, Any] = parent
__lowerCAmelCase : Any = batch_size
__lowerCAmelCase : Any = seq_length
__lowerCAmelCase : Optional[Any] = is_training
__lowerCAmelCase : Any = use_input_mask
__lowerCAmelCase : Any = use_token_type_ids
__lowerCAmelCase : Tuple = use_labels
__lowerCAmelCase : Optional[Any] = vocab_size
__lowerCAmelCase : Tuple = hidden_size
__lowerCAmelCase : str = rotary_dim
__lowerCAmelCase : Union[str, Any] = num_hidden_layers
__lowerCAmelCase : Union[str, Any] = num_attention_heads
__lowerCAmelCase : int = intermediate_size
__lowerCAmelCase : List[str] = hidden_act
__lowerCAmelCase : int = hidden_dropout_prob
__lowerCAmelCase : Any = attention_probs_dropout_prob
__lowerCAmelCase : List[Any] = max_position_embeddings
__lowerCAmelCase : Optional[Any] = initializer_range
__lowerCAmelCase : Tuple = None
__lowerCAmelCase : int = vocab_size - 1
__lowerCAmelCase : Dict = vocab_size - 1
__lowerCAmelCase : int = vocab_size - 1
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : List[str] = None
if self.use_input_mask:
__lowerCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : Optional[int] = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=_SCREAMING_SNAKE_CASE , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.prepare_config_and_inputs()
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Any = config_and_inputs
__lowerCAmelCase : Dict = {'input_ids': input_ids, 'attention_mask': attention_mask}
return config, inputs_dict
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[str] = 20
__lowerCAmelCase : List[str] = model_class_name(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = model.init_cache(input_ids.shape[0] , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype='i4' )
__lowerCAmelCase : Optional[Any] = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCAmelCase : Any = model(
input_ids[:, :-1] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCAmelCase : int = model(
input_ids[:, -1:] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=outputs_cache.past_key_values , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = 20
__lowerCAmelCase : List[str] = model_class_name(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
__lowerCAmelCase : List[str] = model.init_cache(input_ids.shape[0] , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
__lowerCAmelCase : Optional[Any] = model(
input_ids[:, :-1] , attention_mask=_SCREAMING_SNAKE_CASE , past_key_values=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : str = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype='i4' )
__lowerCAmelCase : Tuple = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=_SCREAMING_SNAKE_CASE , position_ids=_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1E-3 , msg=f"Max diff is {diff}" )
@require_flax
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : Tuple = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
A_ : str = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = FlaxGPTJModelTester(self )
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@tooslow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = GPTaTokenizer.from_pretrained('gpt2' , pad_token='<|endoftext|>' , padding_side='left' )
__lowerCAmelCase : Optional[int] = tokenizer(['Hello this is a long string', 'Hey'] , return_tensors='np' , padding=_SCREAMING_SNAKE_CASE , truncation=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = FlaxGPTJForCausalLM.from_pretrained('EleutherAI/gpt-j-6B' )
__lowerCAmelCase : Any = False
__lowerCAmelCase : Any = model.config.eos_token_id
__lowerCAmelCase : Union[str, Any] = jax.jit(model.generate )
__lowerCAmelCase : Optional[Any] = jit_generate(
inputs['input_ids'] , attention_mask=inputs['attention_mask'] , pad_token_id=tokenizer.pad_token_id ).sequences
__lowerCAmelCase : str = tokenizer.batch_decode(_SCREAMING_SNAKE_CASE , skip_special_tokens=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = [
'Hello this is a long string of text.\n\nI\'m trying to get the text of the',
'Hey, I\'m a little late to the party. I\'m going to',
]
self.assertListEqual(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
@is_pt_flax_cross_test
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCAmelCase : str = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCAmelCase : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCAmelCase : Optional[int] = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase , __lowerCAmelCase : List[Any] = pt_inputs['input_ids'].shape
__lowerCAmelCase : int = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = 0
__lowerCAmelCase : Tuple = 1
__lowerCAmelCase : List[str] = 0
__lowerCAmelCase : Any = 1
__lowerCAmelCase : Optional[Any] = pt_model_class(_SCREAMING_SNAKE_CASE ).eval()
__lowerCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
__lowerCAmelCase : int = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = fx_state
with torch.no_grad():
__lowerCAmelCase : Union[str, Any] = pt_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
__lowerCAmelCase : str = fx_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = model_class.from_pretrained(_SCREAMING_SNAKE_CASE , from_pt=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = fx_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(
len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output_loaded, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@is_pt_flax_cross_test
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
__lowerCAmelCase : List[str] = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
__lowerCAmelCase : Dict = model_class.__name__[4:] # Skip the "Flax" at the beginning
__lowerCAmelCase : str = getattr(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = pt_model_class(_SCREAMING_SNAKE_CASE ).eval()
__lowerCAmelCase : Tuple = model_class(_SCREAMING_SNAKE_CASE , dtype=jnp.floataa )
__lowerCAmelCase : List[str] = load_flax_weights_in_pytorch_model(_SCREAMING_SNAKE_CASE , fx_model.params )
__lowerCAmelCase , __lowerCAmelCase : int = pt_inputs['input_ids'].shape
__lowerCAmelCase : List[str] = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = 0
__lowerCAmelCase : Optional[Any] = 1
__lowerCAmelCase : Optional[int] = 0
__lowerCAmelCase : Optional[Any] = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
__lowerCAmelCase : List[str] = pt_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
__lowerCAmelCase : Optional[int] = fx_model(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = pt_model_class.from_pretrained(_SCREAMING_SNAKE_CASE , from_flax=_SCREAMING_SNAKE_CASE )
with torch.no_grad():
__lowerCAmelCase : Any = pt_model_loaded(**_SCREAMING_SNAKE_CASE ).to_tuple()
self.assertEqual(
len(_SCREAMING_SNAKE_CASE ) , len(_SCREAMING_SNAKE_CASE ) , 'Output lengths differ between Flax and PyTorch' )
for fx_output, pt_output in zip(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4E-2 )
@tooslow
def __lowerCamelCase ( self ):
for model_class_name in self.all_model_classes:
__lowerCAmelCase : Optional[int] = model_class_name.from_pretrained('EleutherAI/gpt-j-6B' )
__lowerCAmelCase : List[Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE ) | 86 | 1 |
"""simple docstring"""
from __future__ import annotations
import os
from typing import Any
import requests
lowerCamelCase__ = """https://api.github.com"""
# https://docs.github.com/en/free-pro-team@latest/rest/reference/users#get-the-authenticated-user
lowerCamelCase__ = BASE_URL + """/user"""
# https://github.com/settings/tokens
lowerCamelCase__ = os.environ.get("""USER_TOKEN""", """""")
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Optional[int] = {
'Authorization': F"token {auth_token}",
'Accept': 'application/vnd.github.v3+json',
}
return requests.get(_UpperCamelCase , headers=_UpperCamelCase ).json()
if __name__ == "__main__": # pragma: no cover
if USER_TOKEN:
for key, value in fetch_github_info(USER_TOKEN).items():
print(f'{key}: {value}')
else:
raise ValueError("""'USER_TOKEN' field cannot be empty.""") | 86 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , ):
__lowerCAmelCase : Tuple = parent
__lowerCAmelCase : Optional[int] = 13
__lowerCAmelCase : List[Any] = 7
__lowerCAmelCase : int = True
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : List[Any] = True
__lowerCAmelCase : Optional[int] = True
__lowerCAmelCase : Optional[Any] = 99
__lowerCAmelCase : int = 3_84
__lowerCAmelCase : Union[str, Any] = 2
__lowerCAmelCase : Tuple = 4
__lowerCAmelCase : str = 37
__lowerCAmelCase : Any = 'gelu'
__lowerCAmelCase : List[str] = 0.1
__lowerCAmelCase : Any = 0.1
__lowerCAmelCase : Union[str, Any] = 5_12
__lowerCAmelCase : int = 16
__lowerCAmelCase : Union[str, Any] = 2
__lowerCAmelCase : int = 0.02
__lowerCAmelCase : Dict = 3
__lowerCAmelCase : Tuple = 4
__lowerCAmelCase : Tuple = 1_28
__lowerCAmelCase : Optional[int] = 2
__lowerCAmelCase : List[str] = 9
__lowerCAmelCase : int = 1
__lowerCAmelCase : int = None
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : Optional[int] = None
if self.use_input_mask:
__lowerCAmelCase : str = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : Tuple = None
if self.use_token_type_ids:
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase : Optional[Any] = None
__lowerCAmelCase : Dict = None
__lowerCAmelCase : Union[str, Any] = None
if self.use_labels:
__lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase : List[Any] = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase : Union[str, Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_SCREAMING_SNAKE_CASE , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = TFConvBertModel(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__lowerCAmelCase : Tuple = [input_ids, input_mask]
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Any = TFConvBertForMaskedLM(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = self.num_labels
__lowerCAmelCase : Optional[Any] = TFConvBertForSequenceClassification(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : int = self.num_choices
__lowerCAmelCase : List[str] = TFConvBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase : Dict = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase : Union[str, Any] = tf.tile(tf.expand_dims(_SCREAMING_SNAKE_CASE , 1 ) , (1, self.num_choices, 1) )
__lowerCAmelCase : Tuple = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__lowerCAmelCase : str = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = self.num_labels
__lowerCAmelCase : Any = TFConvBertForTokenClassification(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase : Union[str, Any] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = TFConvBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__lowerCAmelCase : Optional[int] = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) : List[str] = config_and_inputs
__lowerCAmelCase : Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : List[str] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A_ : str = (
{
'feature-extraction': TFConvBertModel,
'fill-mask': TFConvBertForMaskedLM,
'question-answering': TFConvBertForQuestionAnswering,
'text-classification': TFConvBertForSequenceClassification,
'token-classification': TFConvBertForTokenClassification,
'zero-shot': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A_ : List[Any] = False
A_ : str = False
A_ : List[Any] = False
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = TFConvBertModelTester(self )
__lowerCAmelCase : Any = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_SCREAMING_SNAKE_CASE )
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Any = True
__lowerCAmelCase : Dict = True
if hasattr(_SCREAMING_SNAKE_CASE , 'use_cache' ):
__lowerCAmelCase : int = True
__lowerCAmelCase : List[str] = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__lowerCAmelCase : str = getattr(self.model_tester , 'key_length' , _SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
__lowerCAmelCase : str = self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = len(model(_SCREAMING_SNAKE_CASE ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_SCREAMING_SNAKE_CASE , saved_model=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = os.path.join(_SCREAMING_SNAKE_CASE , 'saved_model' , '1' )
__lowerCAmelCase : int = tf.keras.models.load_model(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = model(_SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
__lowerCAmelCase : List[str] = outputs['encoder_hidden_states']
__lowerCAmelCase : Tuple = outputs['encoder_attentions']
else:
__lowerCAmelCase : Optional[int] = outputs['hidden_states']
__lowerCAmelCase : Tuple = outputs['attentions']
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , _SCREAMING_SNAKE_CASE )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Tuple = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase , __lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
__lowerCAmelCase : Optional[Any] = True
__lowerCAmelCase : List[Any] = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
__lowerCAmelCase : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__lowerCAmelCase : Tuple = getattr(self.model_tester , 'key_length' , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = getattr(self.model_tester , 'key_length' , _SCREAMING_SNAKE_CASE )
def check_decoder_attentions_output(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = len(_SCREAMING_SNAKE_CASE )
self.assertEqual(out_len % 2 , 0 )
__lowerCAmelCase : Optional[Any] = outputs.decoder_attentions
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__lowerCAmelCase : List[str] = True
__lowerCAmelCase : Optional[int] = False
__lowerCAmelCase : List[Any] = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Tuple = len(_SCREAMING_SNAKE_CASE )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
if self.is_encoder_decoder:
__lowerCAmelCase : Any = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_decoder_attentions_output(_SCREAMING_SNAKE_CASE )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__lowerCAmelCase : Optional[Any] = True
__lowerCAmelCase : str = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
# Check attention is always last and order is fine
__lowerCAmelCase : Dict = True
__lowerCAmelCase : Optional[Any] = True
__lowerCAmelCase : List[Any] = model_class(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = model(self._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_SCREAMING_SNAKE_CASE ) )
self.assertEqual(model.config.output_hidden_states , _SCREAMING_SNAKE_CASE )
check_encoder_attentions_output(_SCREAMING_SNAKE_CASE )
@require_tf
class A__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
__lowerCAmelCase : int = tf.constant([[0, 1, 2, 3, 4, 5]] )
__lowerCAmelCase : Tuple = model(_SCREAMING_SNAKE_CASE )[0]
__lowerCAmelCase : Tuple = [1, 6, 7_68]
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) | 86 | 1 |
"""simple docstring"""
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class A__ ( _lowerCamelCase , _lowerCamelCase):
@register_to_config
def __init__( self , *,
_SCREAMING_SNAKE_CASE = 4 , _SCREAMING_SNAKE_CASE = 7_68 , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , ):
super().__init__()
__lowerCAmelCase : Dict = nn.Parameter(torch.zeros(_SCREAMING_SNAKE_CASE ) )
# parameters for additional clip time embeddings
__lowerCAmelCase : Dict = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# parameters for encoder hidden states
__lowerCAmelCase : List[Any] = clip_extra_context_tokens
__lowerCAmelCase : Optional[Any] = nn.Linear(
_SCREAMING_SNAKE_CASE , self.clip_extra_context_tokens * cross_attention_dim )
__lowerCAmelCase : List[Any] = nn.Linear(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = nn.LayerNorm(_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , *, _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
if do_classifier_free_guidance:
# Add the classifier free guidance embeddings to the image embeddings
__lowerCAmelCase : Tuple = image_embeddings.shape[0]
__lowerCAmelCase : int = self.learned_classifier_free_guidance_embeddings.unsqueeze(0 )
__lowerCAmelCase : int = classifier_free_guidance_embeddings.expand(
_SCREAMING_SNAKE_CASE , -1 )
__lowerCAmelCase : Optional[Any] = torch.cat([classifier_free_guidance_embeddings, image_embeddings] , dim=0 )
# The image embeddings batch size and the text embeddings batch size are equal
assert image_embeddings.shape[0] == prompt_embeds.shape[0]
__lowerCAmelCase : int = prompt_embeds.shape[0]
# "Specifically, we modify the architecture described in Nichol et al. (2021) by projecting and
# adding CLIP embeddings to the existing timestep embedding, ...
__lowerCAmelCase : Any = self.embedding_proj(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = self.clip_image_embeddings_project_to_time_embeddings(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = time_projected_image_embeddings + time_projected_prompt_embeds
# ... and by projecting CLIP embeddings into four
# extra tokens of context that are concatenated to the sequence of outputs from the GLIDE text encoder"
__lowerCAmelCase : Optional[Any] = self.clip_extra_context_tokens_proj(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = clip_extra_context_tokens.reshape(_SCREAMING_SNAKE_CASE , -1 , self.clip_extra_context_tokens )
__lowerCAmelCase : Any = clip_extra_context_tokens.permute(0 , 2 , 1 )
__lowerCAmelCase : List[Any] = self.encoder_hidden_states_proj(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = self.text_encoder_hidden_states_norm(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = torch.cat([clip_extra_context_tokens, text_encoder_hidden_states] , dim=1 )
return text_encoder_hidden_states, additive_clip_time_embeddings | 86 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class A__ ( unittest.TestCase):
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=30 , _SCREAMING_SNAKE_CASE=4_00 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=1 / 2_55 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=[0.5, 0.5, 0.5] , _SCREAMING_SNAKE_CASE=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
__lowerCAmelCase : Any = size if size is not None else {'shortest_edge': 18, 'longest_edge': 13_33}
__lowerCAmelCase : Optional[int] = parent
__lowerCAmelCase : int = batch_size
__lowerCAmelCase : str = num_channels
__lowerCAmelCase : Optional[int] = min_resolution
__lowerCAmelCase : List[Any] = max_resolution
__lowerCAmelCase : Union[str, Any] = do_resize
__lowerCAmelCase : Optional[Any] = size
__lowerCAmelCase : Dict = do_rescale
__lowerCAmelCase : Optional[Any] = rescale_factor
__lowerCAmelCase : Any = do_normalize
__lowerCAmelCase : List[str] = image_mean
__lowerCAmelCase : Union[str, Any] = image_std
__lowerCAmelCase : Optional[int] = do_pad
def __lowerCamelCase ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
if not batched:
__lowerCAmelCase : str = image_inputs[0]
if isinstance(_SCREAMING_SNAKE_CASE , Image.Image ):
__lowerCAmelCase , __lowerCAmelCase : Optional[int] = image.size
else:
__lowerCAmelCase , __lowerCAmelCase : Union[str, Any] = image.shape[1], image.shape[2]
if w < h:
__lowerCAmelCase : str = int(self.size['shortest_edge'] * h / w )
__lowerCAmelCase : Optional[int] = self.size['shortest_edge']
elif w > h:
__lowerCAmelCase : str = self.size['shortest_edge']
__lowerCAmelCase : Union[str, Any] = int(self.size['shortest_edge'] * w / h )
else:
__lowerCAmelCase : str = self.size['shortest_edge']
__lowerCAmelCase : Optional[Any] = self.size['shortest_edge']
else:
__lowerCAmelCase : str = []
for image in image_inputs:
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
__lowerCAmelCase : Any = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[0] )[0]
__lowerCAmelCase : Dict = max(_SCREAMING_SNAKE_CASE , key=lambda _SCREAMING_SNAKE_CASE : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : List[str] = DetrImageProcessor if is_vision_available() else None
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[Any] = DetrImageProcessingTester(self )
@property
def __lowerCamelCase ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_mean' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'image_std' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_normalize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_rescale' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'rescale_factor' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_resize' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'size' ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , 'do_pad' ) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18, 'longest_edge': 13_33} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_SCREAMING_SNAKE_CASE )
self.assertEqual(image_processor.size , {'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad , _SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
pass
def __lowerCamelCase ( self ):
# Initialize image_processing
__lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input
__lowerCAmelCase : int = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase , __lowerCAmelCase : int = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
# Initialize image_processing
__lowerCAmelCase : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__lowerCAmelCase : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input
__lowerCAmelCase : Tuple = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : Tuple = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase : Union[str, Any] = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : List[Any] = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def __lowerCamelCase ( self ):
# Initialize image_processing
__lowerCAmelCase : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__lowerCAmelCase : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input
__lowerCAmelCase : Any = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : Dict = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
__lowerCAmelCase : Tuple = image_processing(_SCREAMING_SNAKE_CASE , return_tensors='pt' ).pixel_values
__lowerCAmelCase , __lowerCAmelCase : Any = self.image_processor_tester.get_expected_values(_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def __lowerCamelCase ( self ):
# prepare image and target
__lowerCAmelCase : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' , 'r' ) as f:
__lowerCAmelCase : Any = json.loads(f.read() )
__lowerCAmelCase : Tuple = {'image_id': 3_97_69, 'annotations': target}
# encode them
__lowerCAmelCase : Dict = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50' )
__lowerCAmelCase : int = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
__lowerCAmelCase : str = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
# verify area
__lowerCAmelCase : List[str] = torch.tensor([5887.9600, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
__lowerCAmelCase : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : str = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# verify image_id
__lowerCAmelCase : Dict = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
__lowerCAmelCase : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
__lowerCAmelCase : Union[str, Any] = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify orig_size
__lowerCAmelCase : int = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
__lowerCAmelCase : List[Any] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) )
@slow
def __lowerCamelCase ( self ):
# prepare image, target and masks_path
__lowerCAmelCase : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' , 'r' ) as f:
__lowerCAmelCase : Optional[int] = json.loads(f.read() )
__lowerCAmelCase : Optional[int] = {'file_name': '000000039769.png', 'image_id': 3_97_69, 'segments_info': target}
__lowerCAmelCase : Union[str, Any] = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
__lowerCAmelCase : Optional[int] = DetrImageProcessor.from_pretrained('facebook/detr-resnet-50-panoptic' )
__lowerCAmelCase : Optional[Any] = image_processing(images=_SCREAMING_SNAKE_CASE , annotations=_SCREAMING_SNAKE_CASE , masks_path=_SCREAMING_SNAKE_CASE , return_tensors='pt' )
# verify pixel values
__lowerCAmelCase : str = torch.Size([1, 3, 8_00, 10_66] )
self.assertEqual(encoding['pixel_values'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] , _SCREAMING_SNAKE_CASE , atol=1E-4 ) )
# verify area
__lowerCAmelCase : int = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] , _SCREAMING_SNAKE_CASE ) )
# verify boxes
__lowerCAmelCase : Optional[int] = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] , _SCREAMING_SNAKE_CASE , atol=1E-3 ) )
# verify image_id
__lowerCAmelCase : str = torch.tensor([3_97_69] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] , _SCREAMING_SNAKE_CASE ) )
# verify is_crowd
__lowerCAmelCase : Optional[int] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] , _SCREAMING_SNAKE_CASE ) )
# verify class_labels
__lowerCAmelCase : str = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] , _SCREAMING_SNAKE_CASE ) )
# verify masks
__lowerCAmelCase : Dict = 82_28_73
self.assertEqual(encoding['labels'][0]['masks'].sum().item() , _SCREAMING_SNAKE_CASE )
# verify orig_size
__lowerCAmelCase : str = torch.tensor([4_80, 6_40] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] , _SCREAMING_SNAKE_CASE ) )
# verify size
__lowerCAmelCase : List[Any] = torch.tensor([8_00, 10_66] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] , _SCREAMING_SNAKE_CASE ) ) | 86 | 1 |
"""simple docstring"""
import os
from dataclasses import dataclass, field
from io import BytesIO
from typing import TYPE_CHECKING, Any, ClassVar, Dict, Optional, Union
import numpy as np
import pyarrow as pa
from .. import config
from ..download.streaming_download_manager import xopen, xsplitext
from ..table import array_cast
from ..utils.py_utils import no_op_if_value_is_null, string_to_dict
if TYPE_CHECKING:
from .features import FeatureType
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = False, False, False
@dataclass
class A__ :
A_ : Optional[int] = None
A_ : bool = True
A_ : bool = True
A_ : Optional[str] = None
# Automatically constructed
A_ : ClassVar[str] = "dict"
A_ : ClassVar[Any] = pa.struct({'bytes': pa.binary(), 'path': pa.string()})
A_ : str = field(default='Audio' , init=_lowerCamelCase , repr=_lowerCamelCase)
def __call__( self ):
return self.pa_type
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
try:
import soundfile as sf # soundfile is a dependency of librosa, needed to decode audio files.
except ImportError as err:
raise ImportError('To support encoding audio data, please install \'soundfile\'.' ) from err
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return {"bytes": None, "path": value}
elif isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
return {"bytes": value, "path": None}
elif "array" in value:
# convert the audio array to wav bytes
__lowerCAmelCase : Optional[int] = BytesIO()
sf.write(_SCREAMING_SNAKE_CASE , value['array'] , value['sampling_rate'] , format='wav' )
return {"bytes": buffer.getvalue(), "path": None}
elif value.get('path' ) is not None and os.path.isfile(value['path'] ):
# we set "bytes": None to not duplicate the data if they're already available locally
if value["path"].endswith('pcm' ):
# "PCM" only has raw audio bytes
if value.get('sampling_rate' ) is None:
# At least, If you want to convert "PCM-byte" to "WAV-byte", you have to know sampling rate
raise KeyError('To use PCM files, please specify a \'sampling_rate\' in Audio object' )
if value.get('bytes' ):
# If we already had PCM-byte, we don`t have to make "read file, make bytes" (just use it!)
__lowerCAmelCase : List[str] = np.frombuffer(value['bytes'] , dtype=np.intaa ).astype(np.floataa ) / 3_27_67
else:
__lowerCAmelCase : Optional[int] = np.memmap(value['path'] , dtype='h' , mode='r' ).astype(np.floataa ) / 3_27_67
__lowerCAmelCase : Union[str, Any] = BytesIO(bytes() )
sf.write(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , value['sampling_rate'] , format='wav' )
return {"bytes": buffer.getvalue(), "path": None}
else:
return {"bytes": None, "path": value.get('path' )}
elif value.get('bytes' ) is not None or value.get('path' ) is not None:
# store the audio bytes, and path is used to infer the audio format using the file extension
return {"bytes": value.get('bytes' ), "path": value.get('path' )}
else:
raise ValueError(
f"An audio sample should have one of 'path' or 'bytes' but they are missing or None in {value}." )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
if not self.decode:
raise RuntimeError('Decoding is disabled for this feature. Please use Audio(decode=True) instead.' )
__lowerCAmelCase , __lowerCAmelCase : str = (value['path'], BytesIO(value['bytes'] )) if value['bytes'] is not None else (value['path'], None)
if path is None and file is None:
raise ValueError(f"An audio sample should have one of 'path' or 'bytes' but both are None in {value}." )
try:
import librosa
import soundfile as sf
except ImportError as err:
raise ImportError('To support decoding audio files, please install \'librosa\' and \'soundfile\'.' ) from err
__lowerCAmelCase : int = xsplitext(_SCREAMING_SNAKE_CASE )[1][1:].lower() if path is not None else None
if not config.IS_OPUS_SUPPORTED and audio_format == "opus":
raise RuntimeError(
'Decoding \'opus\' files requires system library \'libsndfile\'>=1.0.31, '
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ' )
elif not config.IS_MP3_SUPPORTED and audio_format == "mp3":
raise RuntimeError(
'Decoding \'mp3\' files requires system library \'libsndfile\'>=1.1.0, '
'You can try to update `soundfile` python library: `pip install "soundfile>=0.12.1"`. ' )
if file is None:
__lowerCAmelCase : int = token_per_repo_id or {}
__lowerCAmelCase : Optional[Any] = path.split('::' )[-1]
try:
__lowerCAmelCase : List[Any] = string_to_dict(_SCREAMING_SNAKE_CASE , config.HUB_DATASETS_URL )['repo_id']
__lowerCAmelCase : Tuple = token_per_repo_id[repo_id]
except (ValueError, KeyError):
__lowerCAmelCase : Union[str, Any] = None
with xopen(_SCREAMING_SNAKE_CASE , 'rb' , use_auth_token=_SCREAMING_SNAKE_CASE ) as f:
__lowerCAmelCase , __lowerCAmelCase : str = sf.read(_SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase , __lowerCAmelCase : Optional[Any] = sf.read(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = array.T
if self.mono:
__lowerCAmelCase : List[Any] = librosa.to_mono(_SCREAMING_SNAKE_CASE )
if self.sampling_rate and self.sampling_rate != sampling_rate:
__lowerCAmelCase : Tuple = librosa.resample(_SCREAMING_SNAKE_CASE , orig_sr=_SCREAMING_SNAKE_CASE , target_sr=self.sampling_rate )
__lowerCAmelCase : List[Any] = self.sampling_rate
return {"path": path, "array": array, "sampling_rate": sampling_rate}
def __lowerCamelCase ( self ):
from .features import Value
if self.decode:
raise ValueError('Cannot flatten a decoded Audio feature.' )
return {
"bytes": Value('binary' ),
"path": Value('string' ),
}
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
if pa.types.is_string(storage.type ):
__lowerCAmelCase : List[Any] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.binary() )
__lowerCAmelCase : Dict = pa.StructArray.from_arrays([bytes_array, storage] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_binary(storage.type ):
__lowerCAmelCase : List[str] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.string() )
__lowerCAmelCase : List[str] = pa.StructArray.from_arrays([storage, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
elif pa.types.is_struct(storage.type ) and storage.type.get_all_field_indices('array' ):
__lowerCAmelCase : Union[str, Any] = pa.array([Audio().encode_example(_SCREAMING_SNAKE_CASE ) if x is not None else None for x in storage.to_pylist()] )
elif pa.types.is_struct(storage.type ):
if storage.type.get_field_index('bytes' ) >= 0:
__lowerCAmelCase : Any = storage.field('bytes' )
else:
__lowerCAmelCase : Optional[Any] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.binary() )
if storage.type.get_field_index('path' ) >= 0:
__lowerCAmelCase : Union[str, Any] = storage.field('path' )
else:
__lowerCAmelCase : List[str] = pa.array([None] * len(_SCREAMING_SNAKE_CASE ) , type=pa.string() )
__lowerCAmelCase : str = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=storage.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE , self.pa_type )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE ):
@no_op_if_value_is_null
def path_to_bytes(_SCREAMING_SNAKE_CASE ):
with xopen(_SCREAMING_SNAKE_CASE , 'rb' ) as f:
__lowerCAmelCase : List[Any] = f.read()
return bytes_
__lowerCAmelCase : Dict = pa.array(
[
(path_to_bytes(x['path'] ) if x['bytes'] is None else x['bytes']) if x is not None else None
for x in storage.to_pylist()
] , type=pa.binary() , )
__lowerCAmelCase : List[Any] = pa.array(
[os.path.basename(_SCREAMING_SNAKE_CASE ) if path is not None else None for path in storage.field('path' ).to_pylist()] , type=pa.string() , )
__lowerCAmelCase : Dict = pa.StructArray.from_arrays([bytes_array, path_array] , ['bytes', 'path'] , mask=bytes_array.is_null() )
return array_cast(_SCREAMING_SNAKE_CASE , self.pa_type ) | 86 |
"""simple docstring"""
import numpy as np
def __lowerCAmelCase (_UpperCamelCase ):
return 1 / (1 + np.exp(-vector ))
def __lowerCAmelCase (_UpperCamelCase ):
return vector * sigmoid(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod() | 86 | 1 |
"""simple docstring"""
from itertools import zip_longest
import requests
from bsa import BeautifulSoup
from pandas import DataFrame
def __lowerCAmelCase (_UpperCamelCase = "laptop" ):
__lowerCAmelCase : Any = F"https://www.amazon.in/laptop/s?k={product}"
__lowerCAmelCase : Dict = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36\n (KHTML, like Gecko)Chrome/44.0.2403.157 Safari/537.36',
'Accept-Language': 'en-US, en;q=0.5',
}
__lowerCAmelCase : List[Any] = BeautifulSoup(requests.get(_UpperCamelCase , headers=_UpperCamelCase ).text )
# Initialize a Pandas dataframe with the column titles
__lowerCAmelCase : Union[str, Any] = DataFrame(
columns=[
'Product Title',
'Product Link',
'Current Price of the product',
'Product Rating',
'MRP of the product',
'Discount',
] )
# Loop through each entry and store them in the dataframe
for item, _ in zip_longest(
soup.find_all(
'div' , attrs={'class': 's-result-item', 'data-component-type': 's-search-result'} , ) , soup.find_all('div' , attrs={'class': 'a-row a-size-base a-color-base'} ) , ):
try:
__lowerCAmelCase : str = item.ha.text
__lowerCAmelCase : Tuple = 'https://www.amazon.in/' + item.ha.a['href']
__lowerCAmelCase : Optional[Any] = item.find('span' , attrs={'class': 'a-offscreen'} ).text
try:
__lowerCAmelCase : List[Any] = item.find('span' , attrs={'class': 'a-icon-alt'} ).text
except AttributeError:
__lowerCAmelCase : List[str] = 'Not available'
try:
__lowerCAmelCase : Any = (
'₹'
+ item.find(
'span' , attrs={'class': 'a-price a-text-price'} ).text.split('₹' )[1]
)
except AttributeError:
__lowerCAmelCase : Optional[int] = ''
try:
__lowerCAmelCase : List[Any] = float(
(
(
float(product_mrp.strip('₹' ).replace(',' , '' ) )
- float(product_price.strip('₹' ).replace(',' , '' ) )
)
/ float(product_mrp.strip('₹' ).replace(',' , '' ) )
)
* 100 )
except ValueError:
__lowerCAmelCase : str = float('nan' )
except AttributeError:
pass
__lowerCAmelCase : str = [
product_title,
product_link,
product_price,
product_rating,
product_mrp,
discount,
]
__lowerCAmelCase : List[Any] = ' '
__lowerCAmelCase : Tuple = ' '
data_frame.index += 1
return data_frame
if __name__ == "__main__":
lowerCamelCase__ = """headphones"""
get_amazon_product_data(product).to_csv(f'Amazon Product Data for {product}.csv') | 86 |
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class A__ :
def __init__( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=13 , _SCREAMING_SNAKE_CASE=7 , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=99 , _SCREAMING_SNAKE_CASE=64 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=5 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=37 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=16 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=3 , _SCREAMING_SNAKE_CASE=4 , _SCREAMING_SNAKE_CASE=None , ):
__lowerCAmelCase : Optional[int] = parent
__lowerCAmelCase : Union[str, Any] = batch_size
__lowerCAmelCase : Dict = seq_length
__lowerCAmelCase : Dict = is_training
__lowerCAmelCase : List[str] = use_input_mask
__lowerCAmelCase : int = use_token_type_ids
__lowerCAmelCase : Optional[int] = use_labels
__lowerCAmelCase : List[Any] = vocab_size
__lowerCAmelCase : Dict = hidden_size
__lowerCAmelCase : Tuple = embedding_size
__lowerCAmelCase : List[Any] = num_hidden_layers
__lowerCAmelCase : Tuple = num_attention_heads
__lowerCAmelCase : Union[str, Any] = intermediate_size
__lowerCAmelCase : Optional[Any] = hidden_act
__lowerCAmelCase : Optional[int] = hidden_dropout_prob
__lowerCAmelCase : Dict = attention_probs_dropout_prob
__lowerCAmelCase : Any = max_position_embeddings
__lowerCAmelCase : Any = type_vocab_size
__lowerCAmelCase : Union[str, Any] = type_sequence_label_size
__lowerCAmelCase : List[str] = initializer_range
__lowerCAmelCase : str = num_labels
__lowerCAmelCase : int = num_choices
__lowerCAmelCase : Union[str, Any] = scope
def __lowerCamelCase ( self ):
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCAmelCase : Optional[int] = None
if self.use_input_mask:
__lowerCAmelCase : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCAmelCase : str = None
if self.use_token_type_ids:
__lowerCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__lowerCAmelCase : Union[str, Any] = None
__lowerCAmelCase : Optional[int] = None
__lowerCAmelCase : Union[str, Any] = None
if self.use_labels:
__lowerCAmelCase : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__lowerCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCAmelCase : Dict = ids_tensor([self.batch_size] , self.num_choices )
__lowerCAmelCase : str = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCamelCase ( self ):
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_SCREAMING_SNAKE_CASE , initializer_range=self.initializer_range , )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : str = MobileBertModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = MobileBertForMaskedLM(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Any = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = MobileBertForNextSentencePrediction(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Dict = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : List[Any] = MobileBertForPreTraining(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : List[Any] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , next_sentence_label=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = MobileBertForQuestionAnswering(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : List[str] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , start_positions=_SCREAMING_SNAKE_CASE , end_positions=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = self.num_labels
__lowerCAmelCase : Tuple = MobileBertForSequenceClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : str = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[Any] = self.num_labels
__lowerCAmelCase : int = MobileBertForTokenClassification(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : Optional[Any] = model(_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Optional[int] = self.num_choices
__lowerCAmelCase : List[str] = MobileBertForMultipleChoice(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__lowerCAmelCase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__lowerCAmelCase : List[str] = model(
_SCREAMING_SNAKE_CASE , attention_mask=_SCREAMING_SNAKE_CASE , token_type_ids=_SCREAMING_SNAKE_CASE , labels=_SCREAMING_SNAKE_CASE , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.prepare_config_and_inputs()
(
(
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) , (
__lowerCAmelCase
) ,
) : Optional[Any] = config_and_inputs
__lowerCAmelCase : List[str] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( _lowerCamelCase , _lowerCamelCase , unittest.TestCase):
A_ : str = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
A_ : List[str] = (
{
'feature-extraction': MobileBertModel,
'fill-mask': MobileBertForMaskedLM,
'question-answering': MobileBertForQuestionAnswering,
'text-classification': MobileBertForSequenceClassification,
'token-classification': MobileBertForTokenClassification,
'zero-shot': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ : Dict = True
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False ):
__lowerCAmelCase : List[str] = super()._prepare_for_class(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , return_labels=_SCREAMING_SNAKE_CASE )
if return_labels:
if model_class in get_values(_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_SCREAMING_SNAKE_CASE )
return inputs_dict
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = MobileBertModelTester(self )
__lowerCAmelCase : str = ConfigTester(self , config_class=_SCREAMING_SNAKE_CASE , hidden_size=37 )
def __lowerCamelCase ( self ):
self.config_tester.run_common_tests()
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*_SCREAMING_SNAKE_CASE )
def __lowerCAmelCase (_UpperCamelCase ):
return torch.tensor(
_UpperCamelCase , dtype=torch.long , device=_UpperCamelCase , )
lowerCamelCase__ = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase):
@slow
def __lowerCamelCase ( self ):
__lowerCAmelCase : Union[str, Any] = MobileBertModel.from_pretrained('google/mobilebert-uncased' ).to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
__lowerCAmelCase : int = model(_SCREAMING_SNAKE_CASE )[0]
__lowerCAmelCase : Dict = torch.Size((1, 9, 5_12) )
self.assertEqual(output.shape , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = torch.tensor(
[
[
[-2.4_73_65_26E07, 8.2_69_16_56E04, 1.6_52_18_38E05],
[-5.7_54_17_04E-01, 3.9_05_60_22E00, 4.4_01_15_07E00],
[2.6_04_73_59E00, 1.5_67_76_52E00, -1.7_32_41_88E-01],
]
] , device=_SCREAMING_SNAKE_CASE , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
__lowerCAmelCase : Tuple = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
__lowerCAmelCase : Union[str, Any] = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound ) | 86 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class A__ ( _lowerCamelCase , unittest.TestCase):
A_ : str = ShapEImgaImgPipeline
A_ : str = ['image']
A_ : int = ['image']
A_ : Tuple = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
A_ : Tuple = False
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return 32
@property
def __lowerCamelCase ( self ):
return self.time_input_dim * 4
@property
def __lowerCamelCase ( self ):
return 8
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Any = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
__lowerCAmelCase : Tuple = CLIPVisionModel(_SCREAMING_SNAKE_CASE )
return model
@property
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = CLIPImageProcessor(
crop_size=2_24 , do_center_crop=_SCREAMING_SNAKE_CASE , do_normalize=_SCREAMING_SNAKE_CASE , do_resize=_SCREAMING_SNAKE_CASE , image_mean=[0.4814_5466, 0.457_8275, 0.4082_1073] , image_std=[0.2686_2954, 0.2613_0258, 0.2757_7711] , resample=3 , size=2_24 , )
return image_processor
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Optional[Any] = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
__lowerCAmelCase : List[Any] = PriorTransformer(**_SCREAMING_SNAKE_CASE )
return model
@property
def __lowerCamelCase ( self ):
torch.manual_seed(0 )
__lowerCAmelCase : Dict = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
__lowerCAmelCase : int = ShapERenderer(**_SCREAMING_SNAKE_CASE )
return model
def __lowerCamelCase ( self ):
__lowerCAmelCase : Any = self.dummy_prior
__lowerCAmelCase : List[Any] = self.dummy_image_encoder
__lowerCAmelCase : int = self.dummy_image_processor
__lowerCAmelCase : Any = self.dummy_renderer
__lowerCAmelCase : Any = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=10_24 , prediction_type='sample' , use_karras_sigmas=_SCREAMING_SNAKE_CASE , clip_sample=_SCREAMING_SNAKE_CASE , clip_sample_range=1.0 , )
__lowerCAmelCase : Tuple = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=0 ):
__lowerCAmelCase : Optional[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(_SCREAMING_SNAKE_CASE ) ).to(_SCREAMING_SNAKE_CASE )
if str(_SCREAMING_SNAKE_CASE ).startswith('mps' ):
__lowerCAmelCase : int = torch.manual_seed(_SCREAMING_SNAKE_CASE )
else:
__lowerCAmelCase : str = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = 'cpu'
__lowerCAmelCase : Dict = self.get_dummy_components()
__lowerCAmelCase : Optional[int] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = pipe(**self.get_dummy_inputs(_SCREAMING_SNAKE_CASE ) )
__lowerCAmelCase : Any = output.images[0]
__lowerCAmelCase : Optional[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
__lowerCAmelCase : List[Any] = np.array(
[
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
0.0003_9216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def __lowerCamelCase ( self ):
# NOTE: Larger batch sizes cause this test to timeout, only test on smaller batches
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = torch_device == 'cpu'
__lowerCAmelCase : Optional[Any] = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=_SCREAMING_SNAKE_CASE , relax_max_difference=_SCREAMING_SNAKE_CASE , )
def __lowerCamelCase ( self ):
__lowerCAmelCase : str = self.get_dummy_components()
__lowerCAmelCase : List[str] = self.pipeline_class(**_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[int] = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[str] = 1
__lowerCAmelCase : List[str] = 2
__lowerCAmelCase : Union[str, Any] = self.get_dummy_inputs(_SCREAMING_SNAKE_CASE )
for key in inputs.keys():
if key in self.batch_params:
__lowerCAmelCase : Optional[Any] = batch_size * [inputs[key]]
__lowerCAmelCase : List[str] = pipe(**_SCREAMING_SNAKE_CASE , num_images_per_prompt=_SCREAMING_SNAKE_CASE )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class A__ ( unittest.TestCase):
def __lowerCamelCase ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def __lowerCamelCase ( self ):
__lowerCAmelCase : int = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' )
__lowerCAmelCase : Any = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy' )
__lowerCAmelCase : Union[str, Any] = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' )
__lowerCAmelCase : Dict = pipe.to(_SCREAMING_SNAKE_CASE )
pipe.set_progress_bar_config(disable=_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = torch.Generator(device=_SCREAMING_SNAKE_CASE ).manual_seed(0 )
__lowerCAmelCase : int = pipe(
_SCREAMING_SNAKE_CASE , generator=_SCREAMING_SNAKE_CASE , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) | 86 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class A__ ( _lowerCamelCase):
A_ : Any = ['image_processor', 'tokenizer']
A_ : Optional[Any] = 'AutoImageProcessor'
A_ : str = 'AutoTokenizer'
def __init__( self , _SCREAMING_SNAKE_CASE=None , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Union[str, Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , _SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Any = kwargs.pop('feature_extractor' )
__lowerCAmelCase : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = self.image_processor
__lowerCAmelCase : Tuple = False
def __call__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : List[Any] = kwargs.pop('images' , _SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Any = kwargs.pop('text' , _SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ) > 0:
__lowerCAmelCase : Dict = args[0]
__lowerCAmelCase : Union[str, Any] = args[1:]
if images is None and text is None:
raise ValueError('You need to specify either an `images` or `text` input to process.' )
if images is not None:
__lowerCAmelCase : Union[str, Any] = self.image_processor(_SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is not None:
__lowerCAmelCase : Dict = self.tokenizer(_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
if text is None:
return inputs
elif images is None:
return encodings
else:
__lowerCAmelCase : Union[str, Any] = encodings['input_ids']
return inputs
def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return self.tokenizer.batch_decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return self.tokenizer.decode(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE )
@contextmanager
def __lowerCamelCase ( self ):
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your images inputs, or in a separate call.' )
__lowerCAmelCase : Any = True
__lowerCAmelCase : Dict = self.tokenizer
yield
__lowerCAmelCase : Optional[int] = self.image_processor
__lowerCAmelCase : Optional[Any] = False
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE=False , _SCREAMING_SNAKE_CASE=None ):
if added_vocab is None:
__lowerCAmelCase : str = self.tokenizer.get_added_vocab()
__lowerCAmelCase : List[Any] = {}
while tokens:
__lowerCAmelCase : int = re.search(R'<s_(.*?)>' , _SCREAMING_SNAKE_CASE , re.IGNORECASE )
if start_token is None:
break
__lowerCAmelCase : Union[str, Any] = start_token.group(1 )
__lowerCAmelCase : Tuple = re.search(Rf"</s_{key}>" , _SCREAMING_SNAKE_CASE , re.IGNORECASE )
__lowerCAmelCase : str = start_token.group()
if end_token is None:
__lowerCAmelCase : Optional[int] = tokens.replace(_SCREAMING_SNAKE_CASE , '' )
else:
__lowerCAmelCase : Optional[Any] = end_token.group()
__lowerCAmelCase : Tuple = re.escape(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Tuple = re.escape(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Dict = re.search(f"{start_token_escaped}(.*?){end_token_escaped}" , _SCREAMING_SNAKE_CASE , re.IGNORECASE )
if content is not None:
__lowerCAmelCase : List[str] = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
__lowerCAmelCase : int = self.tokenajson(_SCREAMING_SNAKE_CASE , is_inner_value=_SCREAMING_SNAKE_CASE , added_vocab=_SCREAMING_SNAKE_CASE )
if value:
if len(_SCREAMING_SNAKE_CASE ) == 1:
__lowerCAmelCase : Tuple = value[0]
__lowerCAmelCase : Tuple = value
else: # leaf nodes
__lowerCAmelCase : Any = []
for leaf in content.split(R'<sep/>' ):
__lowerCAmelCase : List[Any] = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
__lowerCAmelCase : Dict = leaf[1:-2] # for categorical special tokens
output[key].append(_SCREAMING_SNAKE_CASE )
if len(output[key] ) == 1:
__lowerCAmelCase : str = output[key][0]
__lowerCAmelCase : Dict = tokens[tokens.find(_SCREAMING_SNAKE_CASE ) + len(_SCREAMING_SNAKE_CASE ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=_SCREAMING_SNAKE_CASE , added_vocab=_SCREAMING_SNAKE_CASE )
if len(_SCREAMING_SNAKE_CASE ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def __lowerCamelCase ( self ):
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , _SCREAMING_SNAKE_CASE , )
return self.image_processor_class
@property
def __lowerCamelCase ( self ):
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , _SCREAMING_SNAKE_CASE , )
return self.image_processor | 86 | 1 |
"""simple docstring"""
import enum
import shutil
import sys
lowerCamelCase__ , lowerCamelCase__ = shutil.get_terminal_size()
lowerCamelCase__ = {"""UP""": """A""", """DOWN""": """B""", """RIGHT""": """C""", """LEFT""": """D"""}
class A__ ( enum.Enum):
A_ : Union[str, Any] = 0
A_ : List[Any] = 1
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase="" ):
sys.stdout.write(str(_UpperCamelCase ) + end )
sys.stdout.flush()
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase="" ):
forceWrite(F"\u001b[{color}m{content}\u001b[0m" , _UpperCamelCase )
def __lowerCAmelCase ():
forceWrite('\r' )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
forceWrite(F"\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}" )
def __lowerCAmelCase ():
forceWrite(' ' * TERMINAL_WIDTH )
reset_cursor()
def __lowerCAmelCase ():
reset_cursor()
forceWrite('-' * TERMINAL_WIDTH ) | 86 |
"""simple docstring"""
def __lowerCAmelCase (_UpperCamelCase ):
__lowerCAmelCase : Tuple = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def __lowerCAmelCase (_UpperCamelCase = 100 ):
__lowerCAmelCase : Optional[int] = 1
__lowerCAmelCase : Optional[Any] = 2
for i in range(2 , max_n + 1 ):
__lowerCAmelCase : Any = pre_numerator
__lowerCAmelCase : Union[str, Any] = 2 * i // 3 if i % 3 == 0 else 1
__lowerCAmelCase : int = cur_numerator
__lowerCAmelCase : Dict = e_cont * pre_numerator + temp
return sum_digits(_UpperCamelCase )
if __name__ == "__main__":
print(f'{solution() = }') | 86 | 1 |
"""simple docstring"""
import math
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import SchedulerMixin, SchedulerOutput
class A__ ( _lowerCamelCase , _lowerCamelCase):
A_ : List[Any] = 1
@register_to_config
def __init__( self , _SCREAMING_SNAKE_CASE = 10_00 , _SCREAMING_SNAKE_CASE = None ):
# set `betas`, `alphas`, `timesteps`
self.set_timesteps(_SCREAMING_SNAKE_CASE )
# standard deviation of the initial noise distribution
__lowerCAmelCase : Dict = 1.0
# For now we only support F-PNDM, i.e. the runge-kutta method
# For more information on the algorithm please take a look at the paper: https://arxiv.org/pdf/2202.09778.pdf
# mainly at formula (9), (12), (13) and the Algorithm 2.
__lowerCAmelCase : List[Any] = 4
# running values
__lowerCAmelCase : Optional[int] = []
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = None ):
__lowerCAmelCase : Tuple = num_inference_steps
__lowerCAmelCase : Optional[int] = torch.linspace(1 , 0 , num_inference_steps + 1 )[:-1]
__lowerCAmelCase : Dict = torch.cat([steps, torch.tensor([0.0] )] )
if self.config.trained_betas is not None:
__lowerCAmelCase : Any = torch.tensor(self.config.trained_betas , dtype=torch.floataa )
else:
__lowerCAmelCase : Union[str, Any] = torch.sin(steps * math.pi / 2 ) ** 2
__lowerCAmelCase : Any = (1.0 - self.betas**2) ** 0.5
__lowerCAmelCase : Tuple = (torch.atana(self.betas , self.alphas ) / math.pi * 2)[:-1]
__lowerCAmelCase : Optional[Any] = timesteps.to(_SCREAMING_SNAKE_CASE )
__lowerCAmelCase : Optional[Any] = []
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE = True , ):
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
__lowerCAmelCase : Tuple = (self.timesteps == timestep).nonzero().item()
__lowerCAmelCase : Union[str, Any] = timestep_index + 1
__lowerCAmelCase : Optional[int] = sample * self.betas[timestep_index] + model_output * self.alphas[timestep_index]
self.ets.append(_SCREAMING_SNAKE_CASE )
if len(self.ets ) == 1:
__lowerCAmelCase : str = self.ets[-1]
elif len(self.ets ) == 2:
__lowerCAmelCase : List[str] = (3 * self.ets[-1] - self.ets[-2]) / 2
elif len(self.ets ) == 3:
__lowerCAmelCase : List[Any] = (23 * self.ets[-1] - 16 * self.ets[-2] + 5 * self.ets[-3]) / 12
else:
__lowerCAmelCase : Any = (1 / 24) * (55 * self.ets[-1] - 59 * self.ets[-2] + 37 * self.ets[-3] - 9 * self.ets[-4])
__lowerCAmelCase : int = self._get_prev_sample(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=_SCREAMING_SNAKE_CASE )
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
return sample
def __lowerCamelCase ( self , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
__lowerCAmelCase : Dict = self.alphas[timestep_index]
__lowerCAmelCase : Tuple = self.betas[timestep_index]
__lowerCAmelCase : List[Any] = self.alphas[prev_timestep_index]
__lowerCAmelCase : Tuple = self.betas[prev_timestep_index]
__lowerCAmelCase : List[Any] = (sample - sigma * ets) / max(_SCREAMING_SNAKE_CASE , 1E-8 )
__lowerCAmelCase : int = next_alpha * pred + ets * next_sigma
return prev_sample
def __len__( self ):
return self.config.num_train_timesteps | 86 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""microsoft/markuplm-base""": """https://huggingface.co/microsoft/markuplm-base/resolve/main/config.json""",
"""microsoft/markuplm-large""": """https://huggingface.co/microsoft/markuplm-large/resolve/main/config.json""",
}
class A__ ( _lowerCamelCase):
A_ : List[Any] = 'markuplm'
def __init__( self , _SCREAMING_SNAKE_CASE=3_05_22 , _SCREAMING_SNAKE_CASE=7_68 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=12 , _SCREAMING_SNAKE_CASE=30_72 , _SCREAMING_SNAKE_CASE="gelu" , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=0.1 , _SCREAMING_SNAKE_CASE=5_12 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=0.02 , _SCREAMING_SNAKE_CASE=1E-12 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=0 , _SCREAMING_SNAKE_CASE=2 , _SCREAMING_SNAKE_CASE=2_56 , _SCREAMING_SNAKE_CASE=10_24 , _SCREAMING_SNAKE_CASE=2_16 , _SCREAMING_SNAKE_CASE=10_01 , _SCREAMING_SNAKE_CASE=32 , _SCREAMING_SNAKE_CASE=50 , _SCREAMING_SNAKE_CASE="absolute" , _SCREAMING_SNAKE_CASE=True , _SCREAMING_SNAKE_CASE=None , **_SCREAMING_SNAKE_CASE , ):
super().__init__(
pad_token_id=_SCREAMING_SNAKE_CASE , bos_token_id=_SCREAMING_SNAKE_CASE , eos_token_id=_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE , )
__lowerCAmelCase : Union[str, Any] = vocab_size
__lowerCAmelCase : Any = hidden_size
__lowerCAmelCase : List[Any] = num_hidden_layers
__lowerCAmelCase : Tuple = num_attention_heads
__lowerCAmelCase : Union[str, Any] = hidden_act
__lowerCAmelCase : List[Any] = intermediate_size
__lowerCAmelCase : List[str] = hidden_dropout_prob
__lowerCAmelCase : List[str] = attention_probs_dropout_prob
__lowerCAmelCase : Optional[int] = max_position_embeddings
__lowerCAmelCase : int = type_vocab_size
__lowerCAmelCase : Tuple = initializer_range
__lowerCAmelCase : int = layer_norm_eps
__lowerCAmelCase : List[str] = position_embedding_type
__lowerCAmelCase : List[Any] = use_cache
__lowerCAmelCase : Optional[Any] = classifier_dropout
# additional properties
__lowerCAmelCase : Optional[int] = max_depth
__lowerCAmelCase : List[str] = max_xpath_tag_unit_embeddings
__lowerCAmelCase : Optional[Any] = max_xpath_subs_unit_embeddings
__lowerCAmelCase : Any = tag_pad_id
__lowerCAmelCase : Union[str, Any] = subs_pad_id
__lowerCAmelCase : int = xpath_unit_hidden_size | 86 | 1 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_chinese_clip import ChineseCLIPImageProcessor
lowerCamelCase__ = logging.get_logger(__name__)
class A__ ( _lowerCamelCase):
def __init__( self , *_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ):
warnings.warn(
'The class ChineseCLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use ChineseCLIPImageProcessor instead.' , _SCREAMING_SNAKE_CASE , )
super().__init__(*_SCREAMING_SNAKE_CASE , **_SCREAMING_SNAKE_CASE ) | 86 |
"""simple docstring"""
import argparse
import torch
# Step 1. clone https://github.com/microsoft/unilm
# Step 2. git checkout to https://github.com/microsoft/unilm/commit/b94ec76c36f02fb2b0bf0dcb0b8554a2185173cd
# Step 3. cd unilm
# Step 4. ln -s $(realpath wavlm/modules.py) ./ # create simlink
# import classes
from unilm.wavlm.WavLM import WavLM as WavLMOrig
from unilm.wavlm.WavLM import WavLMConfig as WavLMConfigOrig
from transformers import WavLMConfig, WavLMModel, logging
logging.set_verbosity_info()
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn.grep_linear""": """encoder.layers.*.attention.gru_rel_pos_linear""",
"""self_attn.relative_attention_bias""": """encoder.layers.*.attention.rel_attn_embed""",
"""self_attn.grep_a""": """encoder.layers.*.attention.gru_rel_pos_const""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """ctc_proj""",
"""mask_emb""": """masked_spec_embed""",
}
lowerCamelCase__ = [
"""ctc_proj""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
]
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
for attribute in key.split('.' ):
__lowerCAmelCase : str = getattr(_UpperCamelCase , _UpperCamelCase )
if weight_type is not None:
__lowerCAmelCase : Tuple = getattr(_UpperCamelCase , _UpperCamelCase ).shape
else:
__lowerCAmelCase : Dict = hf_pointer.shape
assert hf_shape == value.shape, (
F"Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"
F" {value.shape} for {full_name}"
)
if weight_type == "weight":
__lowerCAmelCase : Union[str, Any] = value
elif weight_type == "weight_g":
__lowerCAmelCase : List[Any] = value
elif weight_type == "weight_v":
__lowerCAmelCase : Any = value
elif weight_type == "bias":
__lowerCAmelCase : List[str] = value
else:
__lowerCAmelCase : List[Any] = value
logger.info(F"{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}." )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : Any = []
__lowerCAmelCase : Optional[int] = fairseq_model.state_dict()
__lowerCAmelCase : Union[str, Any] = hf_model.feature_extractor
for name, value in fairseq_dict.items():
__lowerCAmelCase : Union[str, Any] = False
if "conv_layers" in name:
load_conv_layer(
_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , hf_model.config.feat_extract_norm == 'group' , )
__lowerCAmelCase : str = True
else:
for key, mapped_key in MAPPING.items():
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
__lowerCAmelCase : int = True
if "*" in mapped_key:
__lowerCAmelCase : List[str] = name.split(_UpperCamelCase )[0].split('.' )[-2]
__lowerCAmelCase : Optional[Any] = mapped_key.replace('*' , _UpperCamelCase )
if "weight_g" in name:
__lowerCAmelCase : Union[str, Any] = 'weight_g'
elif "weight_v" in name:
__lowerCAmelCase : int = 'weight_v'
elif "bias" in name and "relative_attention_bias" not in name:
__lowerCAmelCase : Optional[Any] = 'bias'
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__lowerCAmelCase : List[str] = 'weight'
else:
__lowerCAmelCase : Optional[Any] = None
set_recursively(_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase )
continue
if not is_used:
unused_weights.append(_UpperCamelCase )
logger.warning(F"Unused weights: {unused_weights}" )
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase , _UpperCamelCase ):
__lowerCAmelCase : List[Any] = full_name.split('conv_layers.' )[-1]
__lowerCAmelCase : Any = name.split('.' )
__lowerCAmelCase : List[Any] = int(items[0] )
__lowerCAmelCase : Tuple = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found."
)
__lowerCAmelCase : Tuple = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found."
)
__lowerCAmelCase : int = value
logger.info(F"Feat extract conv layer {layer_id} was initialized from {full_name}." )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F"{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was"
" found."
)
__lowerCAmelCase : Optional[Any] = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F"{full_name} has size {value.shape}, but"
F" {feature_extractor[layer_id].layer_norm.weight.data.shape} was found."
)
__lowerCAmelCase : Any = value
logger.info(F"Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}." )
else:
unused_weights.append(_UpperCamelCase )
@torch.no_grad()
def __lowerCAmelCase (_UpperCamelCase , _UpperCamelCase , _UpperCamelCase=None ):
# load the pre-trained checkpoints
__lowerCAmelCase : Any = torch.load(_UpperCamelCase )
__lowerCAmelCase : List[str] = WavLMConfigOrig(checkpoint['cfg'] )
__lowerCAmelCase : Optional[Any] = WavLMOrig(_UpperCamelCase )
model.load_state_dict(checkpoint['model'] )
model.eval()
if config_path is not None:
__lowerCAmelCase : Dict = WavLMConfig.from_pretrained(_UpperCamelCase )
else:
__lowerCAmelCase : List[str] = WavLMConfig()
__lowerCAmelCase : List[str] = WavLMModel(_UpperCamelCase )
recursively_load_weights(_UpperCamelCase , _UpperCamelCase )
hf_wavlm.save_pretrained(_UpperCamelCase )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
lowerCamelCase__ = parser.parse_args()
convert_wavlm_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 86 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.