code stringlengths 81 54k | code_codestyle int64 0 721 | style_context stringlengths 91 41.9k | style_context_codestyle int64 0 699 | label int64 0 1 |
|---|---|---|---|---|
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> int:
assert isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ), F'''The input value of [n={number}] is not an integer'''
if number == 1:
return 2
elif number < 1:
lowerCAmelCase__ : Dict = F'''The input value of [n={number}] has to be > 0'''
raise ValueError(SCREAMING_SNAKE_CASE_ )
else:
lowerCAmelCase__ : str = sylvester(number - 1 )
lowerCAmelCase__ : Dict = num - 1
lowerCAmelCase__ : Dict = num
return lower * upper + 1
if __name__ == "__main__":
print(F"""The 8th number in Sylvester's sequence: {sylvester(8)}""") | 69 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {
"""configuration_chinese_clip""": [
"""CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ChineseCLIPConfig""",
"""ChineseCLIPOnnxConfig""",
"""ChineseCLIPTextConfig""",
"""ChineseCLIPVisionConfig""",
],
"""processing_chinese_clip""": ["""ChineseCLIPProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""ChineseCLIPFeatureExtractor"""]
lowerCamelCase__ = ["""ChineseCLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ChineseCLIPModel""",
"""ChineseCLIPPreTrainedModel""",
"""ChineseCLIPTextModel""",
"""ChineseCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 69 | 1 |
import argparse
import os
import re
import numpy as np
import PIL
import torch
from timm import create_model
from torch.optim.lr_scheduler import OneCycleLR
from torch.utils.data import DataLoader, Dataset
from torchvision.transforms import Compose, RandomResizedCrop, Resize, ToTensor
from accelerate import Accelerator
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]:
lowerCAmelCase__ : Optional[int] = fname.split(os.path.sep )[-1]
return re.search(r'^(.*)_\d+\.jpg$' , SCREAMING_SNAKE_CASE_ ).groups()[0]
class A__ ( __magic_name__ ):
def __init__( self : Optional[int] , a : Dict , a : Tuple=None , a : Tuple=None ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = file_names
lowerCAmelCase__ : str = image_transform
lowerCAmelCase__ : Tuple = label_to_id
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.file_names )
def __getitem__( self : Optional[int] , a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.file_names[idx]
lowerCAmelCase__ : int = PIL.Image.open(a )
lowerCAmelCase__ : Optional[Any] = raw_image.convert('RGB' )
if self.image_transform is not None:
lowerCAmelCase__ : Any = self.image_transform(a )
lowerCAmelCase__ : Dict = extract_label(a )
if self.label_to_id is not None:
lowerCAmelCase__ : Optional[Any] = self.label_to_id[label]
return {"image": image, "label": label}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
# Initialize accelerator
if args.with_tracking:
lowerCAmelCase__ : List[str] = Accelerator(
cpu=args.cpu , mixed_precision=args.mixed_precision , log_with='all' , project_dir=args.project_dir )
else:
lowerCAmelCase__ : List[str] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ : Union[str, Any] = config['lr']
lowerCAmelCase__ : str = int(config['num_epochs'] )
lowerCAmelCase__ : List[Any] = int(config['seed'] )
lowerCAmelCase__ : Optional[int] = int(config['batch_size'] )
lowerCAmelCase__ : List[str] = config['image_size']
if not isinstance(SCREAMING_SNAKE_CASE_ , (list, tuple) ):
lowerCAmelCase__ : str = (image_size, image_size)
# Parse out whether we are saving every epoch or after a certain number of batches
if hasattr(args.checkpointing_steps , 'isdigit' ):
if args.checkpointing_steps == "epoch":
lowerCAmelCase__ : List[Any] = args.checkpointing_steps
elif args.checkpointing_steps.isdigit():
lowerCAmelCase__ : List[Any] = int(args.checkpointing_steps )
else:
raise ValueError(
F'''Argument `checkpointing_steps` must be either a number or `epoch`. `{args.checkpointing_steps}` passed.''' )
else:
lowerCAmelCase__ : Union[str, Any] = None
# We need to initialize the trackers we use, and also store our configuration
if args.with_tracking:
lowerCAmelCase__ : Optional[Any] = os.path.split(SCREAMING_SNAKE_CASE_ )[-1].split('.' )[0]
accelerator.init_trackers(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Grab all the image filenames
lowerCAmelCase__ : List[Any] = [os.path.join(args.data_dir , SCREAMING_SNAKE_CASE_ ) for fname in os.listdir(args.data_dir ) if fname.endswith('.jpg' )]
# Build the label correspondences
lowerCAmelCase__ : Optional[int] = [extract_label(SCREAMING_SNAKE_CASE_ ) for fname in file_names]
lowerCAmelCase__ : Tuple = list(set(SCREAMING_SNAKE_CASE_ ) )
id_to_label.sort()
lowerCAmelCase__ : int = {lbl: i for i, lbl in enumerate(SCREAMING_SNAKE_CASE_ )}
# Set the seed before splitting the data.
np.random.seed(SCREAMING_SNAKE_CASE_ )
torch.manual_seed(SCREAMING_SNAKE_CASE_ )
torch.cuda.manual_seed_all(SCREAMING_SNAKE_CASE_ )
# Split our filenames between train and validation
lowerCAmelCase__ : Any = np.random.permutation(len(SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase__ : List[str] = int(0.8 * len(SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase__ : str = random_perm[:cut]
lowerCAmelCase__ : Tuple = random_perm[cut:]
# For training we use a simple RandomResizedCrop
lowerCAmelCase__ : int = Compose([RandomResizedCrop(SCREAMING_SNAKE_CASE_ , scale=(0.5, 1.0) ), ToTensor()] )
lowerCAmelCase__ : Optional[Any] = PetsDataset(
[file_names[i] for i in train_split] , image_transform=SCREAMING_SNAKE_CASE_ , label_to_id=SCREAMING_SNAKE_CASE_ )
# For evaluation, we use a deterministic Resize
lowerCAmelCase__ : List[str] = Compose([Resize(SCREAMING_SNAKE_CASE_ ), ToTensor()] )
lowerCAmelCase__ : str = PetsDataset([file_names[i] for i in eval_split] , image_transform=SCREAMING_SNAKE_CASE_ , label_to_id=SCREAMING_SNAKE_CASE_ )
# Instantiate dataloaders.
lowerCAmelCase__ : Dict = DataLoader(SCREAMING_SNAKE_CASE_ , shuffle=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , num_workers=4 )
lowerCAmelCase__ : Union[str, Any] = DataLoader(SCREAMING_SNAKE_CASE_ , shuffle=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ , num_workers=4 )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ : Tuple = create_model('resnet50d' , pretrained=SCREAMING_SNAKE_CASE_ , num_classes=len(SCREAMING_SNAKE_CASE_ ) )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
lowerCAmelCase__ : List[Any] = model.to(accelerator.device )
# Freezing the base model
for param in model.parameters():
lowerCAmelCase__ : Any = False
for param in model.get_classifier().parameters():
lowerCAmelCase__ : List[Any] = True
# We normalize the batches of images to be a bit faster.
lowerCAmelCase__ : Optional[int] = torch.tensor(model.default_cfg['mean'] )[None, :, None, None].to(accelerator.device )
lowerCAmelCase__ : int = torch.tensor(model.default_cfg['std'] )[None, :, None, None].to(accelerator.device )
# Instantiate optimizer
lowerCAmelCase__ : Optional[int] = torch.optim.Adam(params=model.parameters() , lr=lr / 25 )
# Instantiate learning rate scheduler
lowerCAmelCase__ : str = OneCycleLR(optimizer=SCREAMING_SNAKE_CASE_ , max_lr=SCREAMING_SNAKE_CASE_ , epochs=SCREAMING_SNAKE_CASE_ , steps_per_epoch=len(SCREAMING_SNAKE_CASE_ ) )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# We need to keep track of how many total steps we have iterated over
lowerCAmelCase__ : str = 0
# We also need to keep track of the starting epoch so files are named properly
lowerCAmelCase__ : List[str] = 0
# Potentially load in the weights and states from a previous save
if args.resume_from_checkpoint:
if args.resume_from_checkpoint is not None or args.resume_from_checkpoint != "":
accelerator.print(F'''Resumed from checkpoint: {args.resume_from_checkpoint}''' )
accelerator.load_state(args.resume_from_checkpoint )
lowerCAmelCase__ : List[Any] = os.path.basename(args.resume_from_checkpoint )
else:
# Get the most recent checkpoint
lowerCAmelCase__ : str = [f.name for f in os.scandir(os.getcwd() ) if f.is_dir()]
dirs.sort(key=os.path.getctime )
lowerCAmelCase__ : str = dirs[-1] # Sorts folders by date modified, most recent checkpoint is the last
# Extract `epoch_{i}` or `step_{i}`
lowerCAmelCase__ : Optional[int] = os.path.splitext(SCREAMING_SNAKE_CASE_ )[0]
if "epoch" in training_difference:
lowerCAmelCase__ : Union[str, Any] = int(training_difference.replace('epoch_' , '' ) ) + 1
lowerCAmelCase__ : Tuple = None
else:
lowerCAmelCase__ : Any = int(training_difference.replace('step_' , '' ) )
lowerCAmelCase__ : int = resume_step // len(SCREAMING_SNAKE_CASE_ )
resume_step -= starting_epoch * len(SCREAMING_SNAKE_CASE_ )
# Now we train the model
for epoch in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
model.train()
if args.with_tracking:
lowerCAmelCase__ : Tuple = 0
if args.resume_from_checkpoint and epoch == starting_epoch and resume_step is not None:
# We need to skip steps until we reach the resumed step
lowerCAmelCase__ : str = accelerator.skip_first_batches(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
overall_step += resume_step
else:
# After the first iteration though, we need to go back to the original dataloader
lowerCAmelCase__ : List[Any] = train_dataloader
for batch in active_dataloader:
# We could avoid this line since we set the accelerator with `device_placement=True`.
lowerCAmelCase__ : Optional[int] = {k: v.to(accelerator.device ) for k, v in batch.items()}
lowerCAmelCase__ : Dict = (batch['image'] - mean) / std
lowerCAmelCase__ : Any = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = torch.nn.functional.cross_entropy(SCREAMING_SNAKE_CASE_ , batch['label'] )
# We keep track of the loss at each epoch
if args.with_tracking:
total_loss += loss.detach().float()
accelerator.backward(SCREAMING_SNAKE_CASE_ )
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Optional[Any] = F'''step_{overall_step}'''
if overall_step % checkpointing_steps == 0:
if args.output_dir is not None:
lowerCAmelCase__ : Union[str, Any] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
model.eval()
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : Optional[int] = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
lowerCAmelCase__ : Tuple = {k: v.to(accelerator.device ) for k, v in batch.items()}
lowerCAmelCase__ : str = (batch['image'] - mean) / std
with torch.no_grad():
lowerCAmelCase__ : List[str] = model(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = outputs.argmax(dim=-1 )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = accelerator.gather_for_metrics((predictions, batch['label']) )
lowerCAmelCase__ : Optional[Any] = predictions == references
num_elems += accurate_preds.shape[0]
accurate += accurate_preds.long().sum()
lowerCAmelCase__ : List[Any] = accurate.item() / num_elems
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}: {100 * eval_metric:.2f}''' )
if args.with_tracking:
accelerator.log(
{
'accuracy': 100 * eval_metric,
'train_loss': total_loss.item() / len(SCREAMING_SNAKE_CASE_ ),
'epoch': epoch,
} , step=SCREAMING_SNAKE_CASE_ , )
if checkpointing_steps == "epoch":
lowerCAmelCase__ : List[str] = F'''epoch_{epoch}'''
if args.output_dir is not None:
lowerCAmelCase__ : int = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
if args.with_tracking:
accelerator.end_training()
def lowerCAmelCase__ ( ) -> Union[str, Any]:
lowerCAmelCase__ : Optional[Any] = argparse.ArgumentParser(description='Simple example of training script.' )
parser.add_argument('--data_dir' , required=SCREAMING_SNAKE_CASE_ , help='The data folder on disk.' )
parser.add_argument('--fp16' , action='store_true' , help='If passed, will use FP16 training.' )
parser.add_argument(
'--mixed_precision' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , choices=['no', 'fp16', 'bf16', 'fp8'] , help='Whether to use mixed precision. Choose'
'between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10.'
'and an Nvidia Ampere GPU.' , )
parser.add_argument('--cpu' , action='store_true' , help='If passed, will train on the CPU.' )
parser.add_argument(
'--checkpointing_steps' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help='Whether the various states should be saved at the end of every n steps, or \'epoch\' for each epoch.' , )
parser.add_argument(
'--output_dir' , type=SCREAMING_SNAKE_CASE_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--with_tracking' , action='store_true' , help='Whether to load in all available experiment trackers from the environment and use them for logging.' , )
parser.add_argument(
'--project_dir' , type=SCREAMING_SNAKE_CASE_ , default='logs' , help='Location on where to store experiment tracking logs` and relevent project information' , )
lowerCAmelCase__ : Optional[int] = parser.parse_args()
lowerCAmelCase__ : List[Any] = {'lr': 3e-2, 'num_epochs': 3, 'seed': 42, 'batch_size': 64, 'image_size': 224}
training_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main() | 69 |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
lowerCamelCase__ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
lowerCamelCase__ = """cuda""" if torch.cuda.is_available() else """cpu"""
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=100 , SCREAMING_SNAKE_CASE_=" " ) -> List[str]:
lowerCAmelCase__ : Optional[Any] = text.split(SCREAMING_SNAKE_CASE_ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> dict:
lowerCAmelCase__ , lowerCAmelCase__ : int = [], []
for title, text in zip(documents['title'] , documents['text'] ):
if text is not None:
for passage in split_text(SCREAMING_SNAKE_CASE_ ):
titles.append(title if title is not None else '' )
texts.append(SCREAMING_SNAKE_CASE_ )
return {"title": titles, "text": texts}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> dict:
lowerCAmelCase__ : List[str] = ctx_tokenizer(
documents['title'] , documents['text'] , truncation=SCREAMING_SNAKE_CASE_ , padding='longest' , return_tensors='pt' )['input_ids']
lowerCAmelCase__ : Tuple = ctx_encoder(input_ids.to(device=SCREAMING_SNAKE_CASE_ ) , return_dict=SCREAMING_SNAKE_CASE_ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]:
######################################
logger.info('Step 1 - Create the dataset' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowerCAmelCase__ : str = load_dataset(
'csv' , data_files=[rag_example_args.csv_path] , split='train' , delimiter='\t' , column_names=['title', 'text'] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowerCAmelCase__ : Optional[Any] = dataset.map(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , num_proc=processing_args.num_proc )
# And compute the embeddings
lowerCAmelCase__ : List[str] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
lowerCAmelCase__ : List[Any] = Features(
{'text': Value('string' ), 'title': Value('string' ), 'embeddings': Sequence(Value('float32' ) )} ) # optional, save as float32 instead of float64 to save space
lowerCAmelCase__ : List[Any] = dataset.map(
partial(SCREAMING_SNAKE_CASE_ , ctx_encoder=SCREAMING_SNAKE_CASE_ , ctx_tokenizer=SCREAMING_SNAKE_CASE_ ) , batched=SCREAMING_SNAKE_CASE_ , batch_size=processing_args.batch_size , features=SCREAMING_SNAKE_CASE_ , )
# And finally save your dataset
lowerCAmelCase__ : Optional[Any] = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset' )
dataset.save_to_disk(SCREAMING_SNAKE_CASE_ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('Step 2 - Index the dataset' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowerCAmelCase__ : Optional[int] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('embeddings' , custom_index=SCREAMING_SNAKE_CASE_ )
# And save the index
lowerCAmelCase__ : str = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset_hnsw_index.faiss' )
dataset.get_index('embeddings' ).save(SCREAMING_SNAKE_CASE_ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class A__ :
lowercase = field(
default=str(Path(__magic_name__ ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ) , metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} , )
lowercase = field(
default=__magic_name__ , metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} , )
lowercase = field(
default='facebook/rag-sequence-nq' , metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} , )
lowercase = field(
default='facebook/dpr-ctx_encoder-multiset-base' , metadata={
'help': (
'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'
' \'facebook/dpr-ctx_encoder-multiset-base\''
)
} , )
lowercase = field(
default=str(Path(__magic_name__ ).parent / 'test_run' / 'dummy-kb' ) , metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} , )
@dataclass
class A__ :
lowercase = field(
default=__magic_name__ , metadata={
'help': 'The number of processes to use to split the documents into passages. Default is single process.'
} , )
lowercase = field(
default=16 , metadata={
'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.'
} , )
@dataclass
class A__ :
lowercase = field(
default=768 , metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} , )
lowercase = field(
default=128 , metadata={
'help': (
'The number of bi-directional links created for every new element during the HNSW index construction.'
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
lowerCamelCase__ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
lowerCamelCase__ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args) | 69 | 1 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCamelCase__ = logging.get_logger(__name__)
class A__ ( __magic_name__ ):
lowercase = ['audio_values', 'audio_mask']
def __init__( self : Dict , a : Dict=2_048 , a : Optional[Any]=1 , a : List[Any]=[16, 16] , a : Dict=128 , a : List[str]=44_100 , a : Union[str, Any]=86 , a : Optional[Any]=2_048 , a : List[Any]=0.0 , **a : Tuple , ):
'''simple docstring'''
super().__init__(
feature_size=a , sampling_rate=a , padding_value=a , **a , )
lowerCAmelCase__ : Optional[Any] = spectrogram_length
lowerCAmelCase__ : str = num_channels
lowerCAmelCase__ : Tuple = patch_size
lowerCAmelCase__ : Optional[int] = feature_size // self.patch_size[1]
lowerCAmelCase__ : Union[str, Any] = n_fft
lowerCAmelCase__ : Union[str, Any] = sampling_rate // hop_length_to_sampling_rate
lowerCAmelCase__ : int = sampling_rate
lowerCAmelCase__ : Union[str, Any] = padding_value
lowerCAmelCase__ : Dict = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=a , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=a , norm='slaney' , mel_scale='slaney' , ).T
def _lowerCamelCase ( self : Optional[int] , a : np.array ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = spectrogram(
a , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=8_0.0 , )
lowerCAmelCase__ : Any = log_spec[:, :-1]
lowerCAmelCase__ : Dict = log_spec - 2_0.0
lowerCAmelCase__ : Tuple = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : str , a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a : Optional[Union[str, TensorType]] = None , a : Optional[bool] = True , a : Optional[int] = None , a : bool = False , a : bool = False , **a : int , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
f''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
f''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
lowerCAmelCase__ : Dict = isinstance(a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCAmelCase__ : List[Any] = is_batched_numpy or (
isinstance(a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase__ : int = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(a , np.ndarray ):
lowerCAmelCase__ : Optional[Any] = np.asarray(a , dtype=np.floataa )
elif isinstance(a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase__ : str = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase__ : Tuple = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowerCAmelCase__ : int = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , a ):
lowerCAmelCase__ : Optional[Any] = [np.asarray(a , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowerCAmelCase__ : Optional[Any] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowerCAmelCase__ : Any = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowerCAmelCase__ : List[Any] = np.array(a ).astype(np.floataa )
# convert into correct format for padding
lowerCAmelCase__ : int = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowerCAmelCase__ : Dict = np.ones([len(a ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowerCAmelCase__ : Optional[Any] = padded_audio_features * self.padding_value
for i in range(len(a ) ):
lowerCAmelCase__ : Tuple = audio_features[i]
lowerCAmelCase__ : List[str] = feature
# return as BatchFeature
if return_attention_mask:
lowerCAmelCase__ : Tuple = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
lowerCAmelCase__ : Any = {'audio_values': padded_audio_features}
lowerCAmelCase__ : Any = BatchFeature(data=a , tensor_type=a )
return encoded_inputs | 69 |
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A__ ( __magic_name__ ):
lowercase = (DDPMParallelScheduler,)
def _lowerCamelCase ( self : str , **a : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : str = {
'num_train_timesteps': 1_000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**a )
return config
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=a , beta_end=a )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=a )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
self.check_over_configs(thresholding=a )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=a , prediction_type=a , sample_max_value=a , )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=a )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.scheduler_classes[0]
lowerCAmelCase__ : Any = self.get_scheduler_config()
lowerCAmelCase__ : List[str] = scheduler_class(**a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1E-5
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.scheduler_classes[0]
lowerCAmelCase__ : Any = self.get_scheduler_config()
lowerCAmelCase__ : int = scheduler_class(**a )
lowerCAmelCase__ : str = len(a )
lowerCAmelCase__ : Tuple = self.dummy_model()
lowerCAmelCase__ : Optional[Any] = self.dummy_sample_deter
lowerCAmelCase__ : int = self.dummy_sample_deter + 0.1
lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter - 0.1
lowerCAmelCase__ : Tuple = samplea.shape[0]
lowerCAmelCase__ : List[Any] = torch.stack([samplea, samplea, samplea] , dim=0 )
lowerCAmelCase__ : Optional[Any] = torch.arange(a )[0:3, None].repeat(1 , a )
lowerCAmelCase__ : List[str] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
lowerCAmelCase__ : Tuple = scheduler.batch_step_no_noise(a , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
lowerCAmelCase__ : str = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Any = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1E-2
assert abs(result_mean.item() - 0.5_0_0_5 ) < 1E-3
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : str = self.scheduler_classes[0]
lowerCAmelCase__ : List[Any] = self.get_scheduler_config()
lowerCAmelCase__ : Dict = scheduler_class(**a )
lowerCAmelCase__ : str = len(a )
lowerCAmelCase__ : Any = self.dummy_model()
lowerCAmelCase__ : int = self.dummy_sample_deter
lowerCAmelCase__ : Tuple = torch.manual_seed(0 )
for t in reversed(range(a ) ):
# 1. predict noise residual
lowerCAmelCase__ : Optional[Any] = model(a , a )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase__ : int = scheduler.step(a , a , a , generator=a ).prev_sample
lowerCAmelCase__ : List[str] = pred_prev_sample
lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1E-3
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = self.scheduler_classes[0]
lowerCAmelCase__ : Dict = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCAmelCase__ : int = scheduler_class(**a )
lowerCAmelCase__ : str = len(a )
lowerCAmelCase__ : Optional[int] = self.dummy_model()
lowerCAmelCase__ : List[str] = self.dummy_sample_deter
lowerCAmelCase__ : Optional[Any] = torch.manual_seed(0 )
for t in reversed(range(a ) ):
# 1. predict noise residual
lowerCAmelCase__ : List[Any] = model(a , a )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase__ : Optional[int] = scheduler.step(a , a , a , generator=a ).prev_sample
lowerCAmelCase__ : str = pred_prev_sample
lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Any = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1E-3
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.scheduler_classes[0]
lowerCAmelCase__ : Any = self.get_scheduler_config()
lowerCAmelCase__ : Optional[int] = scheduler_class(**a )
lowerCAmelCase__ : Optional[Any] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=a )
lowerCAmelCase__ : List[Any] = scheduler.timesteps
for i, timestep in enumerate(a ):
if i == len(a ) - 1:
lowerCAmelCase__ : Tuple = -1
else:
lowerCAmelCase__ : Dict = timesteps[i + 1]
lowerCAmelCase__ : str = scheduler.previous_timestep(a )
lowerCAmelCase__ : int = prev_t.item()
self.assertEqual(a , a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.scheduler_classes[0]
lowerCAmelCase__ : Optional[int] = self.get_scheduler_config()
lowerCAmelCase__ : Optional[Any] = scheduler_class(**a )
lowerCAmelCase__ : str = [100, 87, 50, 51, 0]
with self.assertRaises(a , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=a )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.scheduler_classes[0]
lowerCAmelCase__ : str = self.get_scheduler_config()
lowerCAmelCase__ : Optional[int] = scheduler_class(**a )
lowerCAmelCase__ : str = [100, 87, 50, 1, 0]
lowerCAmelCase__ : int = len(a )
with self.assertRaises(a , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=a , timesteps=a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.scheduler_classes[0]
lowerCAmelCase__ : Dict = self.get_scheduler_config()
lowerCAmelCase__ : Optional[int] = scheduler_class(**a )
lowerCAmelCase__ : str = [scheduler.config.num_train_timesteps]
with self.assertRaises(
a , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=a ) | 69 | 1 |
import importlib
import shutil
import threading
import warnings
from typing import List
import fsspec
import fsspec.asyn
from . import compression
from .hffilesystem import HfFileSystem
lowerCamelCase__ = importlib.util.find_spec("""s3fs""") is not None
if _has_safs:
from .safilesystem import SaFileSystem # noqa: F401
lowerCamelCase__ = [
compression.BzaFileSystem,
compression.GzipFileSystem,
compression.LzaFileSystem,
compression.XzFileSystem,
compression.ZstdFileSystem,
]
# Register custom filesystems
for fs_class in COMPRESSION_FILESYSTEMS + [HfFileSystem]:
if fs_class.protocol in fsspec.registry and fsspec.registry[fs_class.protocol] is not fs_class:
warnings.warn(F"""A filesystem protocol was already set for {fs_class.protocol} and will be overwritten.""")
fsspec.register_implementation(fs_class.protocol, fs_class, clobber=True)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
if "://" in dataset_path:
lowerCAmelCase__ : int = dataset_path.split('://' )[1]
return dataset_path
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bool:
if fs is not None and fs.protocol != "file":
return True
else:
return False
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
lowerCAmelCase__ : int = not is_remote_filesystem(SCREAMING_SNAKE_CASE_ )
if is_local:
# LocalFileSystem.mv does copy + rm, it is more efficient to simply move a local directory
shutil.move(fs._strip_protocol(SCREAMING_SNAKE_CASE_ ) , fs._strip_protocol(SCREAMING_SNAKE_CASE_ ) )
else:
fs.mv(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , recursive=SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( ) -> None:
if hasattr(fsspec.asyn , 'reset_lock' ):
# for future fsspec>2022.05.0
fsspec.asyn.reset_lock()
else:
lowerCAmelCase__ : Any = None
lowerCAmelCase__ : Optional[int] = None
lowerCAmelCase__ : List[str] = threading.Lock() | 69 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( __magic_name__ ):
lowercase = ['image_processor', 'tokenizer']
lowercase = 'LayoutLMv3ImageProcessor'
lowercase = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self : Optional[int] , a : Union[str, Any]=None , a : Optional[Any]=None , **a : str ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , a , )
lowerCAmelCase__ : int = kwargs.pop('feature_extractor' )
lowerCAmelCase__ : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(a , a )
def __call__( self : List[Any] , a : List[Any] , a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , a : Union[List[List[int]], List[List[List[int]]]] = None , a : Optional[Union[List[int], List[List[int]]]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = None , a : Optional[int] = None , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : str , ):
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
# first, apply the image processor
lowerCAmelCase__ : List[str] = self.image_processor(images=a , return_tensors=a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(a , a ):
lowerCAmelCase__ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCAmelCase__ : List[str] = features['words']
lowerCAmelCase__ : List[Any] = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_token_type_ids=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_length=a , verbose=a , return_tensors=a , **a , )
# add pixel values
lowerCAmelCase__ : Tuple = features.pop('pixel_values' )
if return_overflowing_tokens is True:
lowerCAmelCase__ : List[str] = self.get_overflowing_images(a , encoded_inputs['overflow_to_sample_mapping'] )
lowerCAmelCase__ : List[str] = images
return encoded_inputs
def _lowerCamelCase ( self : Any , a : List[str] , a : int ):
'''simple docstring'''
lowerCAmelCase__ : int = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(a ) != len(a ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f''' {len(a )} and {len(a )}''' )
return images_with_overflow
def _lowerCamelCase ( self : Union[str, Any] , *a : Optional[Any] , **a : List[str] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a , **a )
def _lowerCamelCase ( self : Tuple , *a : List[str] , **a : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*a , **a )
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , a , )
return self.image_processor_class
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , a , )
return self.image_processor | 69 | 1 |
import json
import os
import unittest
from transformers.models.blenderbot_small.tokenization_blenderbot_small import (
VOCAB_FILES_NAMES,
BlenderbotSmallTokenizer,
)
from ...test_tokenization_common import TokenizerTesterMixin
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = BlenderbotSmallTokenizer
lowercase = False
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
super().setUp()
lowerCAmelCase__ : Any = ['__start__', 'adapt', 'act', 'ap@@', 'te', '__end__', '__unk__']
lowerCAmelCase__ : str = dict(zip(a , range(len(a ) ) ) )
lowerCAmelCase__ : Union[str, Any] = ['#version: 0.2', 'a p', 't e</w>', 'ap t</w>', 'a d', 'ad apt</w>', 'a c', 'ac t</w>', '']
lowerCAmelCase__ : str = {'unk_token': '__unk__', 'bos_token': '__start__', 'eos_token': '__end__'}
lowerCAmelCase__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as fp:
fp.write(json.dumps(a ) + '\n' )
with open(self.merges_file , 'w' , encoding='utf-8' ) as fp:
fp.write('\n'.join(a ) )
def _lowerCamelCase ( self : List[str] , **a : Union[str, Any] ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return BlenderbotSmallTokenizer.from_pretrained(self.tmpdirname , **a )
def _lowerCamelCase ( self : Tuple , a : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = 'adapt act apte'
lowerCAmelCase__ : List[Any] = 'adapt act apte'
return input_text, output_text
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = BlenderbotSmallTokenizer(self.vocab_file , self.merges_file , **self.special_tokens_map )
lowerCAmelCase__ : Dict = 'adapt act apte'
lowerCAmelCase__ : str = ['adapt', 'act', 'ap@@', 'te']
lowerCAmelCase__ : Dict = tokenizer.tokenize(a )
self.assertListEqual(a , a )
lowerCAmelCase__ : int = [tokenizer.bos_token] + tokens + [tokenizer.eos_token]
lowerCAmelCase__ : Optional[int] = [0, 1, 2, 3, 4, 5]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
assert tok('sam' ).input_ids == [1_384]
lowerCAmelCase__ : Any = 'I am a small frog.'
lowerCAmelCase__ : List[str] = tok([src_text] , padding=a , truncation=a )['input_ids']
lowerCAmelCase__ : Union[str, Any] = tok.batch_decode(a , skip_special_tokens=a , clean_up_tokenization_spaces=a )[0]
assert src_text != decoded # I wish it did!
assert decoded == "i am a small frog ."
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : int = BlenderbotSmallTokenizer.from_pretrained('facebook/blenderbot-90M' )
lowerCAmelCase__ : Optional[int] = 'I am a small frog .'
lowerCAmelCase__ : Any = '.'
lowerCAmelCase__ : Optional[int] = tok(a )['input_ids']
lowerCAmelCase__ : Any = tok(a )['input_ids']
assert encoded[-1] == encoded_dot[0] | 69 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class A__ ( __magic_name__ ):
def __init__( self : List[str] , a : Optional[Any] , a : int=13 , a : str=7 , a : Any=True , a : List[str]=True , a : Any=False , a : List[Any]=True , a : List[str]=99 , a : Optional[Any]=32 , a : List[str]=5 , a : List[Any]=4 , a : List[Any]=64 , a : List[Any]="gelu" , a : List[Any]=0.1 , a : List[Any]=0.1 , a : int=512 , a : Tuple=16 , a : List[str]=2 , a : int=0.0_2 , a : Union[str, Any]=3 , a : Any=4 , a : Union[str, Any]=None , a : Union[str, Any]=2 , a : List[str]=2 , a : int=2 , a : Dict=2 , a : List[str]=4 , a : str=1 , ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = parent
lowerCAmelCase__ : int = batch_size
lowerCAmelCase__ : str = seq_length
lowerCAmelCase__ : Tuple = is_training
lowerCAmelCase__ : List[str] = use_input_mask
lowerCAmelCase__ : Optional[int] = use_token_type_ids
lowerCAmelCase__ : Any = use_labels
lowerCAmelCase__ : List[Any] = vocab_size
lowerCAmelCase__ : str = hidden_size
lowerCAmelCase__ : str = num_hidden_layers
lowerCAmelCase__ : List[str] = num_attention_heads
lowerCAmelCase__ : int = intermediate_size
lowerCAmelCase__ : Optional[int] = hidden_act
lowerCAmelCase__ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase__ : Union[str, Any] = max_position_embeddings
lowerCAmelCase__ : Optional[int] = type_vocab_size
lowerCAmelCase__ : Dict = type_sequence_label_size
lowerCAmelCase__ : Optional[int] = initializer_range
lowerCAmelCase__ : List[Any] = num_labels
lowerCAmelCase__ : Any = num_choices
lowerCAmelCase__ : str = scope
lowerCAmelCase__ : Any = q_groups
lowerCAmelCase__ : Any = k_groups
lowerCAmelCase__ : Union[str, Any] = v_groups
lowerCAmelCase__ : int = post_attention_groups
lowerCAmelCase__ : str = intermediate_groups
lowerCAmelCase__ : Union[str, Any] = output_groups
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Tuple = None
if self.use_input_mask:
lowerCAmelCase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : List[Any] = None
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : Tuple = None
if self.use_labels:
lowerCAmelCase__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ : int = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ : str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def _lowerCamelCase ( self : Optional[int] , a : List[str] , a : List[str] , a : Any , a : Optional[int] , a : str , a : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = SqueezeBertModel(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : List[str] = model(a , a )
lowerCAmelCase__ : Any = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : str , a : Any , a : Tuple , a : int , a : Union[str, Any] , a : Tuple , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = SqueezeBertForMaskedLM(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Any = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : Optional[int] , a : Union[str, Any] , a : Optional[Any] , a : str , a : Optional[Any] , a : str , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Any = SqueezeBertForQuestionAnswering(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : List[str] = model(
a , attention_mask=a , start_positions=a , end_positions=a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self : Tuple , a : List[Any] , a : Optional[int] , a : Union[str, Any] , a : str , a : str , a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.num_labels
lowerCAmelCase__ : Dict = SqueezeBertForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Union[str, Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self : Any , a : int , a : Any , a : Dict , a : Any , a : Tuple , a : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = self.num_labels
lowerCAmelCase__ : Dict = SqueezeBertForTokenClassification(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self : str , a : Optional[int] , a : List[Any] , a : int , a : List[Any] , a : Union[str, Any] , a : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.num_choices
lowerCAmelCase__ : Union[str, Any] = SqueezeBertForMultipleChoice(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : List[str] = model(
a , attention_mask=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.prepare_config_and_inputs()
((lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__)) : List[Any] = config_and_inputs
lowerCAmelCase__ : Dict = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
lowercase = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = True
lowercase = False
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = SqueezeBertModelTester(self )
lowerCAmelCase__ : Dict = ConfigTester(self , config_class=a , dim=37 )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*a )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*a )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*a )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*a )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*a )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*a )
@slow
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Optional[int] = SqueezeBertModel.from_pretrained(a )
self.assertIsNotNone(a )
@require_sentencepiece
@require_tokenizers
@require_torch
class A__ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : int = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' )
lowerCAmelCase__ : str = torch.tensor([[1, 29_414, 232, 328, 740, 1_140, 12_695, 69, 13, 1_588, 2]] )
lowerCAmelCase__ : Any = model(a )[0]
lowerCAmelCase__ : Tuple = torch.Size((1, 3) )
self.assertEqual(output.shape , a )
lowerCAmelCase__ : int = torch.tensor([[0.6_4_0_1, -0.0_3_4_9, -0.6_0_4_1]] )
self.assertTrue(torch.allclose(a , a , atol=1E-4 ) ) | 69 | 1 |
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""ut/deta""": """https://huggingface.co/ut/deta/resolve/main/config.json""",
}
class A__ ( __magic_name__ ):
lowercase = 'deta'
lowercase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Union[str, Any] , a : List[Any]=None , a : Optional[int]=900 , a : Any=2_048 , a : Any=6 , a : Tuple=2_048 , a : Optional[int]=8 , a : List[str]=6 , a : Optional[int]=1_024 , a : Optional[int]=8 , a : int=0.0 , a : str=True , a : Optional[Any]="relu" , a : Optional[Any]=256 , a : Tuple=0.1 , a : Union[str, Any]=0.0 , a : List[Any]=0.0 , a : int=0.0_2 , a : str=1.0 , a : Dict=True , a : Any=False , a : Dict="sine" , a : Optional[Any]=5 , a : int=4 , a : List[Any]=4 , a : Optional[int]=True , a : Union[str, Any]=300 , a : List[Any]=True , a : Optional[int]=True , a : int=1 , a : List[Any]=5 , a : Dict=2 , a : int=1 , a : List[Any]=1 , a : Optional[Any]=5 , a : Any=2 , a : Optional[int]=0.1 , a : Any=0.2_5 , **a : str , ):
'''simple docstring'''
if backbone_config is None:
logger.info('`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.' )
lowerCAmelCase__ : Optional[Any] = CONFIG_MAPPING['resnet'](out_features=['stage2', 'stage3', 'stage4'] )
else:
if isinstance(a , a ):
lowerCAmelCase__ : Dict = backbone_config.pop('model_type' )
lowerCAmelCase__ : Union[str, Any] = CONFIG_MAPPING[backbone_model_type]
lowerCAmelCase__ : str = config_class.from_dict(a )
lowerCAmelCase__ : int = backbone_config
lowerCAmelCase__ : Dict = num_queries
lowerCAmelCase__ : List[Any] = max_position_embeddings
lowerCAmelCase__ : Tuple = d_model
lowerCAmelCase__ : int = encoder_ffn_dim
lowerCAmelCase__ : Optional[Any] = encoder_layers
lowerCAmelCase__ : str = encoder_attention_heads
lowerCAmelCase__ : List[str] = decoder_ffn_dim
lowerCAmelCase__ : Optional[int] = decoder_layers
lowerCAmelCase__ : List[str] = decoder_attention_heads
lowerCAmelCase__ : List[str] = dropout
lowerCAmelCase__ : List[Any] = attention_dropout
lowerCAmelCase__ : str = activation_dropout
lowerCAmelCase__ : Union[str, Any] = activation_function
lowerCAmelCase__ : List[str] = init_std
lowerCAmelCase__ : Optional[Any] = init_xavier_std
lowerCAmelCase__ : Optional[Any] = encoder_layerdrop
lowerCAmelCase__ : Union[str, Any] = auxiliary_loss
lowerCAmelCase__ : List[str] = position_embedding_type
# deformable attributes
lowerCAmelCase__ : Dict = num_feature_levels
lowerCAmelCase__ : Optional[Any] = encoder_n_points
lowerCAmelCase__ : Union[str, Any] = decoder_n_points
lowerCAmelCase__ : str = two_stage
lowerCAmelCase__ : int = two_stage_num_proposals
lowerCAmelCase__ : int = with_box_refine
lowerCAmelCase__ : Optional[int] = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError('If two_stage is True, with_box_refine must be True.' )
# Hungarian matcher
lowerCAmelCase__ : Dict = class_cost
lowerCAmelCase__ : Tuple = bbox_cost
lowerCAmelCase__ : Any = giou_cost
# Loss coefficients
lowerCAmelCase__ : List[Any] = mask_loss_coefficient
lowerCAmelCase__ : Dict = dice_loss_coefficient
lowerCAmelCase__ : Tuple = bbox_loss_coefficient
lowerCAmelCase__ : str = giou_loss_coefficient
lowerCAmelCase__ : str = eos_coefficient
lowerCAmelCase__ : str = focal_alpha
super().__init__(is_encoder_decoder=a , **a )
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return self.d_model
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = copy.deepcopy(self.__dict__ )
lowerCAmelCase__ : Any = self.backbone_config.to_dict()
lowerCAmelCase__ : Union[str, Any] = self.__class__.model_type
return output | 69 |
lowerCamelCase__ = """Alexander Joslin"""
import operator as op
from .stack import Stack
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> int:
lowerCAmelCase__ : Union[str, Any] = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
lowerCAmelCase__ : Stack[int] = Stack()
lowerCAmelCase__ : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(SCREAMING_SNAKE_CASE_ ) )
elif i in operators:
# RULE 2
operator_stack.push(SCREAMING_SNAKE_CASE_ )
elif i == ")":
# RULE 4
lowerCAmelCase__ : List[Any] = operator_stack.peek()
operator_stack.pop()
lowerCAmelCase__ : List[str] = operand_stack.peek()
operand_stack.pop()
lowerCAmelCase__ : List[Any] = operand_stack.peek()
operand_stack.pop()
lowerCAmelCase__ : Tuple = operators[opr](SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
operand_stack.push(SCREAMING_SNAKE_CASE_ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase__ = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""") | 69 | 1 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
lowerCAmelCase__ : str = nn.functional.normalize(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = nn.functional.normalize(SCREAMING_SNAKE_CASE_ )
return torch.mm(SCREAMING_SNAKE_CASE_ , normalized_text_embeds.t() )
class A__ ( __magic_name__ ):
lowercase = CLIPConfig
lowercase = ['CLIPEncoderLayer']
def __init__( self : Any , a : CLIPConfig ):
'''simple docstring'''
super().__init__(a )
lowerCAmelCase__ : Optional[int] = CLIPVisionModel(config.vision_config )
lowerCAmelCase__ : Tuple = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=a )
lowerCAmelCase__ : List[str] = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=a )
lowerCAmelCase__ : List[str] = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=a )
lowerCAmelCase__ : Dict = nn.Parameter(torch.ones(17 ) , requires_grad=a )
lowerCAmelCase__ : List[str] = nn.Parameter(torch.ones(3 ) , requires_grad=a )
@torch.no_grad()
def _lowerCamelCase ( self : int , a : int , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : str = self.vision_model(a )[1] # pooled_output
lowerCAmelCase__ : Optional[int] = self.visual_projection(a )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
lowerCAmelCase__ : Any = cosine_distance(a , self.special_care_embeds ).cpu().float().numpy()
lowerCAmelCase__ : Any = cosine_distance(a , self.concept_embeds ).cpu().float().numpy()
lowerCAmelCase__ : List[str] = []
lowerCAmelCase__ : Optional[int] = image_embeds.shape[0]
for i in range(a ):
lowerCAmelCase__ : List[str] = {'special_scores': {}, 'special_care': [], 'concept_scores': {}, 'bad_concepts': []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
lowerCAmelCase__ : str = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
lowerCAmelCase__ : int = special_cos_dist[i][concept_idx]
lowerCAmelCase__ : str = self.special_care_embeds_weights[concept_idx].item()
lowerCAmelCase__ : Optional[Any] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img['special_scores'][concept_idx]} )
lowerCAmelCase__ : str = 0.0_1
for concept_idx in range(len(cos_dist[0] ) ):
lowerCAmelCase__ : Optional[int] = cos_dist[i][concept_idx]
lowerCAmelCase__ : List[Any] = self.concept_embeds_weights[concept_idx].item()
lowerCAmelCase__ : List[str] = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(a )
result.append(a )
lowerCAmelCase__ : Optional[int] = [len(res['bad_concepts'] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def _lowerCamelCase ( self : Optional[Any] , a : torch.FloatTensor , a : torch.FloatTensor ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.vision_model(a )[1] # pooled_output
lowerCAmelCase__ : Optional[Any] = self.visual_projection(a )
lowerCAmelCase__ : Tuple = cosine_distance(a , self.special_care_embeds )
lowerCAmelCase__ : Optional[Any] = cosine_distance(a , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
lowerCAmelCase__ : Union[str, Any] = 0.0
lowerCAmelCase__ : Optional[Any] = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
lowerCAmelCase__ : str = torch.any(special_scores > 0 , dim=1 )
lowerCAmelCase__ : List[str] = special_care * 0.0_1
lowerCAmelCase__ : Tuple = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
lowerCAmelCase__ : int = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
lowerCAmelCase__ : Tuple = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts | 69 |
import numpy
class A__ :
def __init__( self : Tuple , a : numpy.ndarray , a : numpy.ndarray ):
'''simple docstring'''
lowerCAmelCase__ : int = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowerCAmelCase__ : Dict = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowerCAmelCase__ : List[str] = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowerCAmelCase__ : List[Any] = numpy.random.rand(3 , 1 )
# Real output values provided.
lowerCAmelCase__ : str = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowerCAmelCase__ : List[Any] = numpy.zeros(output_array.shape )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowerCAmelCase__ : Tuple = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowerCAmelCase__ : Any = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowerCAmelCase__ : Optional[Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowerCAmelCase__ : int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def _lowerCamelCase ( self : Optional[int] , a : numpy.ndarray , a : int , a : bool ):
'''simple docstring'''
for iteration in range(1 , iterations + 1 ):
lowerCAmelCase__ : Any = self.feedforward()
self.back_propagation()
if give_loss:
lowerCAmelCase__ : Tuple = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f'''Iteration {iteration} Loss: {loss}''' )
def _lowerCamelCase ( self : Optional[Any] , a : numpy.ndarray ):
'''simple docstring'''
lowerCAmelCase__ : Dict = input_arr
lowerCAmelCase__ : Any = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowerCAmelCase__ : int = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowerCAmelCase__ : List[Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> numpy.ndarray:
return 1 / (1 + numpy.exp(-value ))
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> numpy.ndarray:
return (value) * (1 - (value))
def lowerCAmelCase__ ( ) -> int:
lowerCAmelCase__ : Any = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowerCAmelCase__ : int = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
lowerCAmelCase__ : List[str] = TwoHiddenLayerNeuralNetwork(
input_array=SCREAMING_SNAKE_CASE_ , output_array=SCREAMING_SNAKE_CASE_ )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=SCREAMING_SNAKE_CASE_ , iterations=10 , give_loss=SCREAMING_SNAKE_CASE_ )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example() | 69 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class A__ ( unittest.TestCase ):
def __init__( self : List[str] , a : str , a : List[Any]=7 , a : Dict=3 , a : Optional[Any]=18 , a : List[Any]=30 , a : Union[str, Any]=400 , a : Tuple=True , a : Optional[int]=32 , a : Dict=True , ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = parent
lowerCAmelCase__ : str = batch_size
lowerCAmelCase__ : str = num_channels
lowerCAmelCase__ : Dict = image_size
lowerCAmelCase__ : List[str] = min_resolution
lowerCAmelCase__ : int = max_resolution
lowerCAmelCase__ : Any = do_resize
lowerCAmelCase__ : Tuple = size_divisor
lowerCAmelCase__ : Optional[Any] = do_rescale
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = GLPNImageProcessor if is_vision_available() else None
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = GLPNImageProcessingTester(self )
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(a , 'do_resize' ) )
self.assertTrue(hasattr(a , 'size_divisor' ) )
self.assertTrue(hasattr(a , 'resample' ) )
self.assertTrue(hasattr(a , 'do_rescale' ) )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
lowerCAmelCase__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=a )
for image in image_inputs:
self.assertIsInstance(a , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowerCAmelCase__ : Tuple = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
lowerCAmelCase__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , numpify=a )
for image in image_inputs:
self.assertIsInstance(a , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowerCAmelCase__ : Optional[int] = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
lowerCAmelCase__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=a , torchify=a )
for image in image_inputs:
self.assertIsInstance(a , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
lowerCAmelCase__ : Tuple = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) | 69 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
def __init__( self : int , a : str , a : Union[str, Any]=13 , a : int=32 , a : Optional[Any]=2 , a : Tuple=3 , a : List[Any]=16 , a : List[str]=[1, 2, 1] , a : int=[2, 2, 4] , a : int=2 , a : Optional[Any]=2.0 , a : Optional[int]=True , a : Dict=0.0 , a : Any=0.0 , a : int=0.1 , a : List[str]="gelu" , a : Optional[Any]=False , a : str=True , a : Dict=0.0_2 , a : Any=1E-5 , a : Optional[int]=True , a : str=None , a : str=True , a : int=10 , a : str=8 , ):
'''simple docstring'''
lowerCAmelCase__ : str = parent
lowerCAmelCase__ : Union[str, Any] = batch_size
lowerCAmelCase__ : List[str] = image_size
lowerCAmelCase__ : Optional[Any] = patch_size
lowerCAmelCase__ : Tuple = num_channels
lowerCAmelCase__ : Optional[int] = embed_dim
lowerCAmelCase__ : Tuple = depths
lowerCAmelCase__ : List[str] = num_heads
lowerCAmelCase__ : List[Any] = window_size
lowerCAmelCase__ : Any = mlp_ratio
lowerCAmelCase__ : Optional[Any] = qkv_bias
lowerCAmelCase__ : Any = hidden_dropout_prob
lowerCAmelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase__ : int = drop_path_rate
lowerCAmelCase__ : Optional[Any] = hidden_act
lowerCAmelCase__ : int = use_absolute_embeddings
lowerCAmelCase__ : List[str] = patch_norm
lowerCAmelCase__ : Optional[int] = layer_norm_eps
lowerCAmelCase__ : List[str] = initializer_range
lowerCAmelCase__ : Optional[Any] = is_training
lowerCAmelCase__ : List[Any] = scope
lowerCAmelCase__ : Dict = use_labels
lowerCAmelCase__ : List[Any] = type_sequence_label_size
lowerCAmelCase__ : Optional[Any] = encoder_stride
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : Optional[Any] = None
if self.use_labels:
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : int = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _lowerCamelCase ( self : List[str] , a : Any , a : str , a : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = SwinvaModel(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[int] = model(a )
lowerCAmelCase__ : str = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCAmelCase__ : Tuple = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _lowerCamelCase ( self : Union[str, Any] , a : Optional[Any] , a : Tuple , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Any = SwinvaForMaskedImageModeling(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : str = model(a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCAmelCase__ : Any = 1
lowerCAmelCase__ : Dict = SwinvaForMaskedImageModeling(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ : List[str] = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _lowerCamelCase ( self : Union[str, Any] , a : int , a : str , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : str = self.type_sequence_label_size
lowerCAmelCase__ : List[Any] = SwinvaForImageClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Union[str, Any] = model(a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = config_and_inputs
lowerCAmelCase__ : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
lowercase = (
{'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = SwinvaModelTester(self )
lowerCAmelCase__ : int = ConfigTester(self , config_class=a , embed_dim=37 )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds' )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : int = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Optional[int] = model_class(a )
lowerCAmelCase__ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : Tuple = [*signature.parameters.keys()]
lowerCAmelCase__ : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Optional[int] = True
for model_class in self.all_model_classes:
lowerCAmelCase__ : Tuple = True
lowerCAmelCase__ : str = False
lowerCAmelCase__ : List[Any] = True
lowerCAmelCase__ : Dict = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : int = model(**self._prepare_for_class(a , a ) )
lowerCAmelCase__ : Dict = outputs.attentions
lowerCAmelCase__ : Dict = len(self.model_tester.depths )
self.assertEqual(len(a ) , a )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase__ : List[str] = True
lowerCAmelCase__ : Optional[int] = config.window_size**2
lowerCAmelCase__ : str = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model(**self._prepare_for_class(a , a ) )
lowerCAmelCase__ : Optional[Any] = outputs.attentions
self.assertEqual(len(a ) , a )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
lowerCAmelCase__ : Tuple = len(a )
# Check attention is always last and order is fine
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : str = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(**self._prepare_for_class(a , a ) )
if hasattr(self.model_tester , 'num_hidden_states_types' ):
lowerCAmelCase__ : Optional[Any] = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
lowerCAmelCase__ : Any = 2
self.assertEqual(out_len + added_hidden_states , len(a ) )
lowerCAmelCase__ : Dict = outputs.attentions
self.assertEqual(len(a ) , a )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def _lowerCamelCase ( self : int , a : Optional[int] , a : int , a : Optional[Any] , a : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model(**self._prepare_for_class(a , a ) )
lowerCAmelCase__ : Optional[Any] = outputs.hidden_states
lowerCAmelCase__ : str = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(a ) , a )
# Swinv2 has a different seq_length
lowerCAmelCase__ : int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase__ : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCAmelCase__ : Union[str, Any] = outputs.reshaped_hidden_states
self.assertEqual(len(a ) , a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = reshaped_hidden_states[0].shape
lowerCAmelCase__ : List[str] = (
reshaped_hidden_states[0].view(a , a , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowerCAmelCase__ : Any = True
self.check_hidden_states_output(a , a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ : List[str] = True
self.check_hidden_states_output(a , a , a , a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Any = 3
lowerCAmelCase__ : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCAmelCase__ : str = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase__ : str = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCAmelCase__ : Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowerCAmelCase__ : str = True
self.check_hidden_states_output(a , a , a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ : Any = True
self.check_hidden_states_output(a , a , a , (padded_height, padded_width) )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
@slow
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : List[str] = SwinvaModel.from_pretrained(a )
self.assertIsNotNone(a )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Optional[int] = _config_zero_init(a )
for model_class in self.all_model_classes:
lowerCAmelCase__ : int = model_class(config=a )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class A__ ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' )
if is_vision_available()
else None
)
@slow
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to(
a )
lowerCAmelCase__ : Dict = self.default_image_processor
lowerCAmelCase__ : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCAmelCase__ : Any = image_processor(images=a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowerCAmelCase__ : Union[str, Any] = model(**a )
# verify the logits
lowerCAmelCase__ : List[str] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , a )
lowerCAmelCase__ : Optional[Any] = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1E-4 ) ) | 69 | 1 |
from __future__ import annotations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> list[int]:
lowerCAmelCase__ : Tuple = 2
lowerCAmelCase__ : Any = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(SCREAMING_SNAKE_CASE_ )
if n > 1:
factors.append(SCREAMING_SNAKE_CASE_ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod() | 69 |
from itertools import permutations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bool:
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
lowerCAmelCase__ : str = [7, 11, 13, 17]
for i, test in enumerate(SCREAMING_SNAKE_CASE_ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = 10 ) -> int:
return sum(
int(''.join(map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) )
for num in permutations(range(SCREAMING_SNAKE_CASE_ ) )
if is_substring_divisible(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
print(F"""{solution() = }""") | 69 | 1 |
import enum
import shutil
import sys
lowerCamelCase__ , lowerCamelCase__ = shutil.get_terminal_size()
lowerCamelCase__ = {"""UP""": """A""", """DOWN""": """B""", """RIGHT""": """C""", """LEFT""": """D"""}
class A__ ( enum.Enum ):
lowercase = 0
lowercase = 1
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="" ) -> Any:
sys.stdout.write(str(SCREAMING_SNAKE_CASE_ ) + end )
sys.stdout.flush()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="" ) -> str:
forceWrite(F'''\u001b[{color}m{content}\u001b[0m''' , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( ) -> Optional[int]:
forceWrite('\r' )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
forceWrite(F'''\033[{num_lines}{CURSOR_TO_CHAR[direction.upper()]}''' )
def lowerCAmelCase__ ( ) -> Dict:
forceWrite(' ' * TERMINAL_WIDTH )
reset_cursor()
def lowerCAmelCase__ ( ) -> int:
reset_cursor()
forceWrite('-' * TERMINAL_WIDTH ) | 69 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = ConsistencyModelPipeline
lowercase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowercase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
lowercase = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'output_type',
'return_dict',
'callback',
'callback_steps',
] )
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet' , )
return unet
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , )
return unet
def _lowerCamelCase ( self : Optional[Any] , a : Union[str, Any]=False ):
'''simple docstring'''
if class_cond:
lowerCAmelCase__ : Tuple = self.dummy_cond_unet
else:
lowerCAmelCase__ : Dict = self.dummy_uncond_unet
# Default to CM multistep sampler
lowerCAmelCase__ : Optional[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
lowerCAmelCase__ : List[Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def _lowerCamelCase ( self : int , a : Optional[int] , a : Any=0 ):
'''simple docstring'''
if str(a ).startswith('mps' ):
lowerCAmelCase__ : List[str] = torch.manual_seed(a )
else:
lowerCAmelCase__ : str = torch.Generator(device=a ).manual_seed(a )
lowerCAmelCase__ : str = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [22, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : Optional[Any] = self.get_dummy_components()
lowerCAmelCase__ : List[Any] = ConsistencyModelPipeline(**a )
lowerCAmelCase__ : Tuple = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : str = self.get_dummy_inputs(a )
lowerCAmelCase__ : str = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ : str = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Tuple = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : Tuple = self.get_dummy_components(class_cond=a )
lowerCAmelCase__ : Union[str, Any] = ConsistencyModelPipeline(**a )
lowerCAmelCase__ : Tuple = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a )
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : Union[str, Any] = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
lowerCAmelCase__ : str = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : Union[str, Any] = self.get_dummy_components()
lowerCAmelCase__ : Tuple = ConsistencyModelPipeline(**a )
lowerCAmelCase__ : Dict = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Union[str, Any] = self.get_dummy_inputs(a )
lowerCAmelCase__ : Optional[Any] = 1
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : List[Any] = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Optional[Any] = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : Optional[int] = self.get_dummy_components(class_cond=a )
lowerCAmelCase__ : List[Any] = ConsistencyModelPipeline(**a )
lowerCAmelCase__ : Optional[Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Tuple = self.get_dummy_inputs(a )
lowerCAmelCase__ : Dict = 1
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ : str = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Dict = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Optional[Any] , a : Tuple=0 , a : Optional[Any]=False , a : Optional[Any]="cpu" , a : Union[str, Any]=torch.floataa , a : Dict=(1, 3, 64, 64) ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(a )
lowerCAmelCase__ : List[Any] = {
'num_inference_steps': None,
'timesteps': [22, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
lowerCAmelCase__ : Optional[int] = self.get_fixed_latents(seed=a , device=a , dtype=a , shape=a )
lowerCAmelCase__ : Tuple = latents
return inputs
def _lowerCamelCase ( self : str , a : Tuple=0 , a : Tuple="cpu" , a : Tuple=torch.floataa , a : str=(1, 3, 64, 64) ):
'''simple docstring'''
if type(a ) == str:
lowerCAmelCase__ : str = torch.device(a )
lowerCAmelCase__ : List[str] = torch.Generator(device=a ).manual_seed(a )
lowerCAmelCase__ : Any = randn_tensor(a , generator=a , device=a , dtype=a )
return latents
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : int = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
lowerCAmelCase__ : List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
lowerCAmelCase__ : List[Any] = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Optional[Any] = self.get_inputs()
lowerCAmelCase__ : Dict = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : List[str] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Union[str, Any] = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
lowerCAmelCase__ : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
lowerCAmelCase__ : Optional[int] = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[str] = self.get_inputs()
lowerCAmelCase__ : Union[str, Any] = 1
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : List[str] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : Optional[int] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Union[str, Any] = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
@require_torch_a
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
lowerCAmelCase__ : List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
lowerCAmelCase__ : Tuple = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : str = self.get_inputs(get_fixed_latents=a , device=a )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ):
lowerCAmelCase__ : Dict = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : str = image[0, -3:, -3:, -1]
lowerCAmelCase__ : str = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@require_torch_a
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
lowerCAmelCase__ : List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
lowerCAmelCase__ : Dict = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Any = self.get_inputs(get_fixed_latents=a , device=a )
lowerCAmelCase__ : List[str] = 1
lowerCAmelCase__ : str = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ):
lowerCAmelCase__ : List[str] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : Dict = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Optional[int] = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 | 69 | 1 |
import json
import os
import torch
from diffusers import UNetaDModel
os.makedirs("""hub/hopper-medium-v2/unet/hor32""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/unet/hor128""", exist_ok=True)
os.makedirs("""hub/hopper-medium-v2/value_function""", exist_ok=True)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Dict:
if hor == 128:
lowerCAmelCase__ : int = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
lowerCAmelCase__ : Dict = (32, 128, 256)
lowerCAmelCase__ : Tuple = ('UpResnetBlock1D', 'UpResnetBlock1D')
elif hor == 32:
lowerCAmelCase__ : Any = ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D')
lowerCAmelCase__ : Union[str, Any] = (32, 64, 128, 256)
lowerCAmelCase__ : int = ('UpResnetBlock1D', 'UpResnetBlock1D', 'UpResnetBlock1D')
lowerCAmelCase__ : Dict = torch.load(F'''/Users/bglickenhaus/Documents/diffuser/temporal_unet-hopper-mediumv2-hor{hor}.torch''' )
lowerCAmelCase__ : Any = model.state_dict()
lowerCAmelCase__ : int = {
'down_block_types': down_block_types,
'block_out_channels': block_out_channels,
'up_block_types': up_block_types,
'layers_per_block': 1,
'use_timestep_embedding': True,
'out_block_type': 'OutConv1DBlock',
'norm_num_groups': 8,
'downsample_each_block': False,
'in_channels': 14,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'flip_sin_to_cos': False,
'freq_shift': 1,
'sample_size': 65_536,
'mid_block_type': 'MidResTemporalBlock1D',
'act_fn': 'mish',
}
lowerCAmelCase__ : Union[str, Any] = UNetaDModel(**SCREAMING_SNAKE_CASE_ )
print(F'''length of state dict: {len(state_dict.keys() )}''' )
print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
lowerCAmelCase__ : List[str] = dict(zip(model.state_dict().keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCAmelCase__ : List[Any] = state_dict.pop(SCREAMING_SNAKE_CASE_ )
hf_value_function.load_state_dict(SCREAMING_SNAKE_CASE_ )
torch.save(hf_value_function.state_dict() , F'''hub/hopper-medium-v2/unet/hor{hor}/diffusion_pytorch_model.bin''' )
with open(F'''hub/hopper-medium-v2/unet/hor{hor}/config.json''' , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( ) -> Optional[Any]:
lowerCAmelCase__ : Dict = {
'in_channels': 14,
'down_block_types': ('DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D', 'DownResnetBlock1D'),
'up_block_types': (),
'out_block_type': 'ValueFunction',
'mid_block_type': 'ValueFunctionMidBlock1D',
'block_out_channels': (32, 64, 128, 256),
'layers_per_block': 1,
'downsample_each_block': True,
'sample_size': 65_536,
'out_channels': 14,
'extra_in_channels': 0,
'time_embedding_type': 'positional',
'use_timestep_embedding': True,
'flip_sin_to_cos': False,
'freq_shift': 1,
'norm_num_groups': 8,
'act_fn': 'mish',
}
lowerCAmelCase__ : int = torch.load('/Users/bglickenhaus/Documents/diffuser/value_function-hopper-mediumv2-hor32.torch' )
lowerCAmelCase__ : Dict = model
lowerCAmelCase__ : int = UNetaDModel(**SCREAMING_SNAKE_CASE_ )
print(F'''length of state dict: {len(state_dict.keys() )}''' )
print(F'''length of value function dict: {len(hf_value_function.state_dict().keys() )}''' )
lowerCAmelCase__ : int = dict(zip(state_dict.keys() , hf_value_function.state_dict().keys() ) )
for k, v in mapping.items():
lowerCAmelCase__ : Tuple = state_dict.pop(SCREAMING_SNAKE_CASE_ )
hf_value_function.load_state_dict(SCREAMING_SNAKE_CASE_ )
torch.save(hf_value_function.state_dict() , 'hub/hopper-medium-v2/value_function/diffusion_pytorch_model.bin' )
with open('hub/hopper-medium-v2/value_function/config.json' , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
unet(32)
# unet(128)
value_function() | 69 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class A__ ( __magic_name__ ):
def __init__( self : int , a : List[str] , a : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = params
lowerCAmelCase__ : Union[str, Any] = np.array(a )
lowerCAmelCase__ : List[Any] = np.array([len(a ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : str , a : List[str] ):
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self : Optional[int] ):
'''simple docstring'''
return len(self.lengths )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.params.max_model_input_size
lowerCAmelCase__ : Optional[int] = self.lengths > max_len
logger.info(f'''Splitting {sum(a )} too long sequences.''' )
def divide_chunks(a : List[str] , a : Tuple ):
return [l[i : i + n] for i in range(0 , len(a ) , a )]
lowerCAmelCase__ : Union[str, Any] = []
lowerCAmelCase__ : Union[str, Any] = []
if self.params.mlm:
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
else:
lowerCAmelCase__ , lowerCAmelCase__ : int = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
lowerCAmelCase__ : Optional[int] = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
lowerCAmelCase__ : Dict = np.insert(a , 0 , a )
if sub_s[-1] != sep_id:
lowerCAmelCase__ : Dict = np.insert(a , len(a ) , a )
assert len(a ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(a )
new_tok_ids.extend(a )
new_lengths.extend([len(a ) for l in sub_seqs] )
lowerCAmelCase__ : str = np.array(a )
lowerCAmelCase__ : Optional[Any] = np.array(a )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = len(self )
lowerCAmelCase__ : List[Any] = self.lengths > 11
lowerCAmelCase__ : Dict = self.token_ids[indices]
lowerCAmelCase__ : Tuple = self.lengths[indices]
lowerCAmelCase__ : Any = len(self )
logger.info(f'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
lowerCAmelCase__ : int = self.params.special_tok_ids['unk_token']
lowerCAmelCase__ : str = len(self )
lowerCAmelCase__ : List[str] = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
lowerCAmelCase__ : int = (unk_occs / self.lengths) < 0.5
lowerCAmelCase__ : List[str] = self.token_ids[indices]
lowerCAmelCase__ : Optional[Any] = self.lengths[indices]
lowerCAmelCase__ : Union[str, Any] = len(self )
logger.info(f'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
if not self.params.is_master:
return
logger.info(f'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def _lowerCamelCase ( self : int , a : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = [t[0] for t in batch]
lowerCAmelCase__ : List[str] = [t[1] for t in batch]
assert len(a ) == len(a )
# Max for paddings
lowerCAmelCase__ : List[str] = max(a )
# Pad token ids
if self.params.mlm:
lowerCAmelCase__ : str = self.params.special_tok_ids['pad_token']
else:
lowerCAmelCase__ : Optional[int] = self.params.special_tok_ids['unk_token']
lowerCAmelCase__ : Tuple = [list(t.astype(a ) ) + [pad_idx] * (max_seq_len_ - len(a )) for t in token_ids]
assert len(tk_ ) == len(a )
assert all(len(a ) == max_seq_len_ for t in tk_ )
lowerCAmelCase__ : Union[str, Any] = torch.tensor(tk_ ) # (bs, max_seq_len_)
lowerCAmelCase__ : List[str] = torch.tensor(a ) # (bs)
return tk_t, lg_t | 69 | 1 |
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class A__ ( __magic_name__ ):
def __init__( self : List[Any] , a : Callable , a : Optional[Features] = None , a : str = None , a : bool = False , a : bool = False , a : Optional[dict] = None , a : Optional[int] = None , **a : str , ):
'''simple docstring'''
super().__init__(
features=a , cache_dir=a , keep_in_memory=a , streaming=a , num_proc=a , **a , )
lowerCAmelCase__ : int = Generator(
cache_dir=a , features=a , generator=a , gen_kwargs=a , **a , )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
if self.streaming:
lowerCAmelCase__ : List[Any] = self.builder.as_streaming_dataset(split='train' )
# Build regular (map-style) dataset
else:
lowerCAmelCase__ : Any = None
lowerCAmelCase__ : int = None
lowerCAmelCase__ : List[Any] = None
lowerCAmelCase__ : Dict = None
self.builder.download_and_prepare(
download_config=a , download_mode=a , verification_mode=a , base_path=a , num_proc=self.num_proc , )
lowerCAmelCase__ : Union[str, Any] = self.builder.as_dataset(
split='train' , verification_mode=a , in_memory=self.keep_in_memory )
return dataset | 69 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
) | 69 | 1 |
import argparse
import torch
from torch import nn
from transformers import MaMaaaConfig, MaMaaaForConditionalGeneration
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]:
lowerCAmelCase__ : Union[str, Any] = [
'encoder.version',
'decoder.version',
'model.encoder.version',
'model.decoder.version',
'decoder.output_projection.weight',
'_float_tensor',
'encoder.embed_positions._float_tensor',
'decoder.embed_positions._float_tensor',
]
for k in ignore_keys:
state_dict.pop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]:
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = emb.weight.shape
lowerCAmelCase__ : Optional[int] = nn.Linear(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , bias=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = emb.weight.data
return lin_layer
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
lowerCAmelCase__ : Optional[Any] = torch.load(SCREAMING_SNAKE_CASE_ , map_location='cpu' )
lowerCAmelCase__ : Any = mam_aaa['args'] or mam_aaa['cfg']['model']
lowerCAmelCase__ : Optional[Any] = mam_aaa['model']
remove_ignore_keys_(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[Any] = state_dict['encoder.embed_tokens.weight'].shape[0]
lowerCAmelCase__ : Optional[int] = MaMaaaConfig(
vocab_size=SCREAMING_SNAKE_CASE_ , max_position_embeddings=1_024 , encoder_layers=args.encoder_layers , decoder_layers=args.decoder_layers , encoder_attention_heads=args.encoder_attention_heads , decoder_attention_heads=args.decoder_attention_heads , encoder_ffn_dim=args.encoder_ffn_embed_dim , decoder_ffn_dim=args.decoder_ffn_embed_dim , d_model=args.encoder_embed_dim , encoder_layerdrop=args.encoder_layerdrop , decoder_layerdrop=args.decoder_layerdrop , dropout=args.dropout , attention_dropout=args.attention_dropout , activation_dropout=args.activation_dropout , activation_function='relu' , )
lowerCAmelCase__ : Tuple = state_dict['decoder.embed_tokens.weight']
lowerCAmelCase__ : List[Any] = MaMaaaForConditionalGeneration(SCREAMING_SNAKE_CASE_ )
model.model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument("""fairseq_path""", type=str, help="""path to a model.pt on local filesystem.""")
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
lowerCamelCase__ = parser.parse_args()
lowerCamelCase__ = convert_fairseq_mamaaa_checkpoint_from_disk(args.fairseq_pathß)
model.save_pretrained(args.pytorch_dump_folder_path) | 69 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
lowerCamelCase__ = None
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""vocab_file""": """sentencepiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
"""tokenizer_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/tokenizer.json""",
},
}
lowerCamelCase__ = {
"""google/rembert""": 256,
}
lowerCamelCase__ = """▁"""
class A__ ( __magic_name__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = RemBertTokenizer
def __init__( self : Optional[Any] , a : str=None , a : Any=None , a : List[Any]=True , a : str=True , a : Dict=False , a : Dict="[CLS]" , a : int="[SEP]" , a : Tuple="<unk>" , a : Optional[Any]="[SEP]" , a : Tuple="<pad>" , a : Dict="[CLS]" , a : Optional[Any]="[MASK]" , **a : str , ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
a , tokenizer_file=a , do_lower_case=a , remove_space=a , keep_accents=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , **a , )
lowerCAmelCase__ : int = do_lower_case
lowerCAmelCase__ : int = remove_space
lowerCAmelCase__ : List[Any] = keep_accents
lowerCAmelCase__ : Optional[Any] = vocab_file
lowerCAmelCase__ : Union[str, Any] = False if not self.vocab_file else True
def _lowerCamelCase ( self : Dict , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
lowerCAmelCase__ : Dict = [self.sep_token_id]
lowerCAmelCase__ : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowerCamelCase ( self : str , a : List[int] , a : Optional[List[int]] = None , a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(a )) + [1] + ([0] * len(a )) + [1]
return [1] + ([0] * len(a )) + [1]
def _lowerCamelCase ( self : List[Any] , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = [self.sep_token_id]
lowerCAmelCase__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCamelCase ( self : Tuple , a : str , a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(a ):
logger.error('Vocabulary path ({}) should be a directory'.format(a ) )
return
lowerCAmelCase__ : int = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ):
copyfile(self.vocab_file , a )
return (out_vocab_file,) | 69 | 1 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
def __init__( self : int , a : str , a : Union[str, Any]=13 , a : int=32 , a : Optional[Any]=2 , a : Tuple=3 , a : List[Any]=16 , a : List[str]=[1, 2, 1] , a : int=[2, 2, 4] , a : int=2 , a : Optional[Any]=2.0 , a : Optional[int]=True , a : Dict=0.0 , a : Any=0.0 , a : int=0.1 , a : List[str]="gelu" , a : Optional[Any]=False , a : str=True , a : Dict=0.0_2 , a : Any=1E-5 , a : Optional[int]=True , a : str=None , a : str=True , a : int=10 , a : str=8 , ):
'''simple docstring'''
lowerCAmelCase__ : str = parent
lowerCAmelCase__ : Union[str, Any] = batch_size
lowerCAmelCase__ : List[str] = image_size
lowerCAmelCase__ : Optional[Any] = patch_size
lowerCAmelCase__ : Tuple = num_channels
lowerCAmelCase__ : Optional[int] = embed_dim
lowerCAmelCase__ : Tuple = depths
lowerCAmelCase__ : List[str] = num_heads
lowerCAmelCase__ : List[Any] = window_size
lowerCAmelCase__ : Any = mlp_ratio
lowerCAmelCase__ : Optional[Any] = qkv_bias
lowerCAmelCase__ : Any = hidden_dropout_prob
lowerCAmelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase__ : int = drop_path_rate
lowerCAmelCase__ : Optional[Any] = hidden_act
lowerCAmelCase__ : int = use_absolute_embeddings
lowerCAmelCase__ : List[str] = patch_norm
lowerCAmelCase__ : Optional[int] = layer_norm_eps
lowerCAmelCase__ : List[str] = initializer_range
lowerCAmelCase__ : Optional[Any] = is_training
lowerCAmelCase__ : List[Any] = scope
lowerCAmelCase__ : Dict = use_labels
lowerCAmelCase__ : List[Any] = type_sequence_label_size
lowerCAmelCase__ : Optional[Any] = encoder_stride
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : Optional[Any] = None
if self.use_labels:
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : int = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _lowerCamelCase ( self : List[str] , a : Any , a : str , a : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = SwinvaModel(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[int] = model(a )
lowerCAmelCase__ : str = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCAmelCase__ : Tuple = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _lowerCamelCase ( self : Union[str, Any] , a : Optional[Any] , a : Tuple , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Any = SwinvaForMaskedImageModeling(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : str = model(a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCAmelCase__ : Any = 1
lowerCAmelCase__ : Dict = SwinvaForMaskedImageModeling(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ : List[str] = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _lowerCamelCase ( self : Union[str, Any] , a : int , a : str , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : str = self.type_sequence_label_size
lowerCAmelCase__ : List[Any] = SwinvaForImageClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Union[str, Any] = model(a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = config_and_inputs
lowerCAmelCase__ : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
lowercase = (
{'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = SwinvaModelTester(self )
lowerCAmelCase__ : int = ConfigTester(self , config_class=a , embed_dim=37 )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds' )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : int = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Optional[int] = model_class(a )
lowerCAmelCase__ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : Tuple = [*signature.parameters.keys()]
lowerCAmelCase__ : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Optional[int] = True
for model_class in self.all_model_classes:
lowerCAmelCase__ : Tuple = True
lowerCAmelCase__ : str = False
lowerCAmelCase__ : List[Any] = True
lowerCAmelCase__ : Dict = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : int = model(**self._prepare_for_class(a , a ) )
lowerCAmelCase__ : Dict = outputs.attentions
lowerCAmelCase__ : Dict = len(self.model_tester.depths )
self.assertEqual(len(a ) , a )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase__ : List[str] = True
lowerCAmelCase__ : Optional[int] = config.window_size**2
lowerCAmelCase__ : str = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model(**self._prepare_for_class(a , a ) )
lowerCAmelCase__ : Optional[Any] = outputs.attentions
self.assertEqual(len(a ) , a )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
lowerCAmelCase__ : Tuple = len(a )
# Check attention is always last and order is fine
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : str = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(**self._prepare_for_class(a , a ) )
if hasattr(self.model_tester , 'num_hidden_states_types' ):
lowerCAmelCase__ : Optional[Any] = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
lowerCAmelCase__ : Any = 2
self.assertEqual(out_len + added_hidden_states , len(a ) )
lowerCAmelCase__ : Dict = outputs.attentions
self.assertEqual(len(a ) , a )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def _lowerCamelCase ( self : int , a : Optional[int] , a : int , a : Optional[Any] , a : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model(**self._prepare_for_class(a , a ) )
lowerCAmelCase__ : Optional[Any] = outputs.hidden_states
lowerCAmelCase__ : str = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(a ) , a )
# Swinv2 has a different seq_length
lowerCAmelCase__ : int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase__ : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCAmelCase__ : Union[str, Any] = outputs.reshaped_hidden_states
self.assertEqual(len(a ) , a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = reshaped_hidden_states[0].shape
lowerCAmelCase__ : List[str] = (
reshaped_hidden_states[0].view(a , a , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowerCAmelCase__ : Any = True
self.check_hidden_states_output(a , a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ : List[str] = True
self.check_hidden_states_output(a , a , a , a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Any = 3
lowerCAmelCase__ : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCAmelCase__ : str = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase__ : str = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCAmelCase__ : Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowerCAmelCase__ : str = True
self.check_hidden_states_output(a , a , a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ : Any = True
self.check_hidden_states_output(a , a , a , (padded_height, padded_width) )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
@slow
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : List[str] = SwinvaModel.from_pretrained(a )
self.assertIsNotNone(a )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Optional[int] = _config_zero_init(a )
for model_class in self.all_model_classes:
lowerCAmelCase__ : int = model_class(config=a )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class A__ ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' )
if is_vision_available()
else None
)
@slow
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to(
a )
lowerCAmelCase__ : Dict = self.default_image_processor
lowerCAmelCase__ : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCAmelCase__ : Any = image_processor(images=a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowerCAmelCase__ : Union[str, Any] = model(**a )
# verify the logits
lowerCAmelCase__ : List[str] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , a )
lowerCAmelCase__ : Optional[Any] = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1E-4 ) ) | 69 |
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class A__ ( __magic_name__ ):
def __init__( self : List[Any] , a : Callable , a : Optional[Features] = None , a : str = None , a : bool = False , a : bool = False , a : Optional[dict] = None , a : Optional[int] = None , **a : str , ):
'''simple docstring'''
super().__init__(
features=a , cache_dir=a , keep_in_memory=a , streaming=a , num_proc=a , **a , )
lowerCAmelCase__ : int = Generator(
cache_dir=a , features=a , generator=a , gen_kwargs=a , **a , )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
if self.streaming:
lowerCAmelCase__ : List[Any] = self.builder.as_streaming_dataset(split='train' )
# Build regular (map-style) dataset
else:
lowerCAmelCase__ : Any = None
lowerCAmelCase__ : int = None
lowerCAmelCase__ : List[Any] = None
lowerCAmelCase__ : Dict = None
self.builder.download_and_prepare(
download_config=a , download_mode=a , verification_mode=a , base_path=a , num_proc=self.num_proc , )
lowerCAmelCase__ : Union[str, Any] = self.builder.as_dataset(
split='train' , verification_mode=a , in_memory=self.keep_in_memory )
return dataset | 69 | 1 |
from unittest import TestCase
from datasets import Dataset
from minhash_deduplication import deduplicate_dataset, make_duplicate_clusters
def lowerCAmelCase__ ( ) -> Optional[Any]:
lowerCAmelCase__ : Union[str, Any] = {
'repo_name': ['test_repo1', 'test_repo2', 'test_repo3'],
'path': ['test_1.py', 'test_2.py', 'unit_test.py'],
'content': ['a ' * 20, 'a ' * 30, 'b ' * 7],
}
lowerCAmelCase__ : List[str] = Dataset.from_dict(SCREAMING_SNAKE_CASE_ )
return dataset
class A__ ( __magic_name__ ):
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = get_dataset()
lowerCAmelCase__ : List[str] = make_duplicate_clusters(a , 0.8_5 )
self.assertEqual(len(duplicate_clusters[0] ) , 2 )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = get_dataset()
lowerCAmelCase__ , lowerCAmelCase__ : Any = deduplicate_dataset(a )
self.assertEqual(len(a ) , 2 )
print(a )
self.assertEqual(duplicate_clusters[0][0]['copies'] , 2 )
self.assertEqual(duplicate_clusters[0][0]['is_extreme'] , a ) | 69 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCamelCase__ = logging.get_logger(__name__)
class A__ ( __magic_name__ ):
lowercase = ['audio_values', 'audio_mask']
def __init__( self : Dict , a : Dict=2_048 , a : Optional[Any]=1 , a : List[Any]=[16, 16] , a : Dict=128 , a : List[str]=44_100 , a : Union[str, Any]=86 , a : Optional[Any]=2_048 , a : List[Any]=0.0 , **a : Tuple , ):
'''simple docstring'''
super().__init__(
feature_size=a , sampling_rate=a , padding_value=a , **a , )
lowerCAmelCase__ : Optional[Any] = spectrogram_length
lowerCAmelCase__ : str = num_channels
lowerCAmelCase__ : Tuple = patch_size
lowerCAmelCase__ : Optional[int] = feature_size // self.patch_size[1]
lowerCAmelCase__ : Union[str, Any] = n_fft
lowerCAmelCase__ : Union[str, Any] = sampling_rate // hop_length_to_sampling_rate
lowerCAmelCase__ : int = sampling_rate
lowerCAmelCase__ : Union[str, Any] = padding_value
lowerCAmelCase__ : Dict = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=a , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=a , norm='slaney' , mel_scale='slaney' , ).T
def _lowerCamelCase ( self : Optional[int] , a : np.array ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = spectrogram(
a , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=8_0.0 , )
lowerCAmelCase__ : Any = log_spec[:, :-1]
lowerCAmelCase__ : Dict = log_spec - 2_0.0
lowerCAmelCase__ : Tuple = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : str , a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a : Optional[Union[str, TensorType]] = None , a : Optional[bool] = True , a : Optional[int] = None , a : bool = False , a : bool = False , **a : int , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
f''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
f''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
lowerCAmelCase__ : Dict = isinstance(a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCAmelCase__ : List[Any] = is_batched_numpy or (
isinstance(a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase__ : int = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(a , np.ndarray ):
lowerCAmelCase__ : Optional[Any] = np.asarray(a , dtype=np.floataa )
elif isinstance(a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase__ : str = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase__ : Tuple = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowerCAmelCase__ : int = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , a ):
lowerCAmelCase__ : Optional[Any] = [np.asarray(a , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowerCAmelCase__ : Optional[Any] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowerCAmelCase__ : Any = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowerCAmelCase__ : List[Any] = np.array(a ).astype(np.floataa )
# convert into correct format for padding
lowerCAmelCase__ : int = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowerCAmelCase__ : Dict = np.ones([len(a ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowerCAmelCase__ : Optional[Any] = padded_audio_features * self.padding_value
for i in range(len(a ) ):
lowerCAmelCase__ : Tuple = audio_features[i]
lowerCAmelCase__ : List[str] = feature
# return as BatchFeature
if return_attention_mask:
lowerCAmelCase__ : Tuple = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
lowerCAmelCase__ : Any = {'audio_values': padded_audio_features}
lowerCAmelCase__ : Any = BatchFeature(data=a , tensor_type=a )
return encoded_inputs | 69 | 1 |
import json
import os
import shutil
import tempfile
import unittest
from transformers import BatchEncoding, CanineTokenizer
from transformers.testing_utils import require_tokenizers, require_torch
from transformers.tokenization_utils import AddedToken
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = CanineTokenizer
lowercase = False
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
super().setUp()
lowerCAmelCase__ : Dict = CanineTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return CanineTokenizer.from_pretrained('google/canine-s' )
def _lowerCamelCase ( self : List[str] , **a : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.tokenizer_class.from_pretrained(self.tmpdirname , **a )
lowerCAmelCase__ : Optional[Any] = 1_024
return tokenizer
@require_torch
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.canine_tokenizer
lowerCAmelCase__ : int = ['Life is like a box of chocolates.', 'You never know what you\'re gonna get.']
# fmt: off
lowerCAmelCase__ : Dict = [57_344, 76, 105, 102, 101, 32, 105, 115, 32, 108, 105, 107, 101, 32, 97, 32, 98, 111, 120, 32, 111, 102, 32, 99, 104, 111, 99, 111, 108, 97, 116, 101, 115, 46, 57_345, 0, 0, 0, 0]
# fmt: on
lowerCAmelCase__ : int = tokenizer(a , padding=a , return_tensors='pt' )
self.assertIsInstance(a , a )
lowerCAmelCase__ : int = list(batch.input_ids.numpy()[0] )
self.assertListEqual(a , a )
self.assertEqual((2, 39) , batch.input_ids.shape )
self.assertEqual((2, 39) , batch.attention_mask.shape )
@require_torch
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.canine_tokenizer
lowerCAmelCase__ : Dict = ['Once there was a man.', 'He wrote a test in HuggingFace Tranformers.']
lowerCAmelCase__ : Optional[Any] = tokenizer(a , padding=a , return_tensors='pt' )
# check if input_ids, attention_mask and token_type_ids are returned
self.assertIn('input_ids' , a )
self.assertIn('attention_mask' , a )
self.assertIn('token_type_ids' , a )
@require_torch
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = self.canine_tokenizer
lowerCAmelCase__ : int = [
'What\'s the weater?',
'It\'s about 25 degrees.',
]
lowerCAmelCase__ : Dict = tokenizer(
text_target=a , max_length=32 , padding='max_length' , truncation=a , return_tensors='pt' )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
lowerCAmelCase__ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase__ : List[Any] = tempfile.mkdtemp()
lowerCAmelCase__ : Dict = ' He is very happy, UNwant\u00E9d,running'
lowerCAmelCase__ : Optional[int] = tokenizer.encode(a , add_special_tokens=a )
tokenizer.save_pretrained(a )
lowerCAmelCase__ : Union[str, Any] = tokenizer.__class__.from_pretrained(a )
lowerCAmelCase__ : Any = after_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
shutil.rmtree(a )
lowerCAmelCase__ : List[Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# Isolate this from the other tests because we save additional tokens/etc
lowerCAmelCase__ : Dict = tempfile.mkdtemp()
lowerCAmelCase__ : int = ' He is very happy, UNwant\u00E9d,running'
lowerCAmelCase__ : List[str] = tokenizer.additional_special_tokens
# We can add a new special token for Canine as follows:
lowerCAmelCase__ : str = chr(0Xe_007 )
additional_special_tokens.append(a )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
lowerCAmelCase__ : int = tokenizer.encode(a , add_special_tokens=a )
tokenizer.save_pretrained(a )
lowerCAmelCase__ : str = tokenizer.__class__.from_pretrained(a )
lowerCAmelCase__ : Optional[Any] = after_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
self.assertIn(a , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
lowerCAmelCase__ : Optional[Any] = tokenizer.__class__.from_pretrained(a , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(a )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.get_tokenizers(do_lower_case=a )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.get_clean_sequence(a )
# a special token for Canine can be defined as follows:
lowerCAmelCase__ : Tuple = 0Xe_005
lowerCAmelCase__ : Tuple = chr(a )
tokenizer.add_special_tokens({'cls_token': special_token} )
lowerCAmelCase__ : Optional[int] = tokenizer.encode(a , add_special_tokens=a )
self.assertEqual(len(a ) , 1 )
lowerCAmelCase__ : int = tokenizer.decode(ids + encoded_special_token , clean_up_tokenization_spaces=a )
lowerCAmelCase__ : List[str] = tokenizer.encode(a , add_special_tokens=a )
lowerCAmelCase__ : Any = tokenizer.encode(a , add_special_tokens=a )
lowerCAmelCase__ : int = tokenizer.encode(a , add_special_tokens=a )
self.assertEqual(a , input_encoded + special_token_id )
lowerCAmelCase__ : Optional[Any] = tokenizer.decode(a , skip_special_tokens=a )
self.assertTrue(special_token not in decoded )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : int = self.get_tokenizers(do_lower_case=a )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCAmelCase__ : List[str] = chr(0Xe_005 )
lowerCAmelCase__ : int = chr(0Xe_006 )
# `add_tokens` method stores special tokens only in `tokenizer.unique_no_split_tokens`. (in tokenization_utils.py)
tokenizer.add_tokens([SPECIAL_TOKEN_1] , special_tokens=a )
# `add_special_tokens` method stores special tokens in `tokenizer.additional_special_tokens`,
# which also occur in `tokenizer.all_special_tokens`. (in tokenization_utils_base.py)
tokenizer.add_special_tokens({'additional_special_tokens': [SPECIAL_TOKEN_2]} )
lowerCAmelCase__ : Any = tokenizer.tokenize(a )
lowerCAmelCase__ : int = tokenizer.tokenize(a )
self.assertEqual(len(a ) , 1 )
self.assertEqual(len(a ) , 1 )
self.assertEqual(token_a[0] , a )
self.assertEqual(token_a[0] , a )
@require_tokenizers
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.get_tokenizers(do_lower_case=a )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
# a special token for Canine can be defined as follows:
lowerCAmelCase__ : int = 0Xe_006
lowerCAmelCase__ : Union[str, Any] = chr(a )
lowerCAmelCase__ : Optional[Any] = AddedToken(a , lstrip=a )
tokenizer.add_special_tokens({'additional_special_tokens': [new_token]} )
with tempfile.TemporaryDirectory() as tmp_dir_name:
tokenizer.save_pretrained(a )
tokenizer.from_pretrained(a )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Any = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(a )
with open(os.path.join(a , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
lowerCAmelCase__ : Optional[int] = json.load(a )
with open(os.path.join(a , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
lowerCAmelCase__ : str = json.load(a )
# a special token for Canine can be defined as follows:
lowerCAmelCase__ : int = 0Xe_006
lowerCAmelCase__ : int = chr(a )
lowerCAmelCase__ : Tuple = [new_token_a]
lowerCAmelCase__ : Optional[Any] = [new_token_a]
with open(os.path.join(a , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(a , a )
with open(os.path.join(a , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(a , a )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
lowerCAmelCase__ : List[Any] = tokenizer_class.from_pretrained(a , extra_ids=0 )
self.assertIn(a , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids([new_token_a] ) ) , )
lowerCAmelCase__ : str = 0Xe_007
lowerCAmelCase__ : Union[str, Any] = chr(a )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
lowerCAmelCase__ : Optional[int] = [AddedToken(a , lstrip=a )]
lowerCAmelCase__ : Dict = tokenizer_class.from_pretrained(
a , additional_special_tokens=a , extra_ids=0 )
self.assertIn(a , tokenizer.additional_special_tokens )
# self.assertIn(new_token_2,tokenizer.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
[new_token_a] , tokenizer.convert_ids_to_tokens(tokenizer.convert_tokens_to_ids([new_token_a] ) ) )
@require_tokenizers
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : str = self.get_tokenizers(do_lower_case=a )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCAmelCase__ : List[Any] = 'hello world'
if self.space_between_special_tokens:
lowerCAmelCase__ : Tuple = '[CLS] hello world [SEP]'
else:
lowerCAmelCase__ : str = input
lowerCAmelCase__ : List[str] = tokenizer.encode(a , add_special_tokens=a )
lowerCAmelCase__ : Optional[Any] = tokenizer.decode(a , spaces_between_special_tokens=self.space_between_special_tokens )
self.assertIn(a , [output, output.lower()] )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
lowerCAmelCase__ : Optional[Any] = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
lowerCAmelCase__ : Union[str, Any] = 'a'
lowerCAmelCase__ : Optional[int] = ord(a )
for attr in attributes_list:
setattr(a , attr + '_id' , a )
self.assertEqual(getattr(a , a ) , a )
self.assertEqual(getattr(a , attr + '_id' ) , a )
setattr(a , attr + '_id' , a )
self.assertEqual(getattr(a , a ) , a )
self.assertEqual(getattr(a , attr + '_id' ) , a )
setattr(a , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(a , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(a , 'additional_special_tokens_ids' ) , [] )
lowerCAmelCase__ : Optional[int] = 0Xe_006
lowerCAmelCase__ : Union[str, Any] = chr(a )
setattr(a , 'additional_special_tokens_ids' , [additional_special_token_id] )
self.assertListEqual(getattr(a , 'additional_special_tokens' ) , [additional_special_token] )
self.assertListEqual(getattr(a , 'additional_special_tokens_ids' ) , [additional_special_token_id] )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
pass | 69 |
import unittest
from transformers import DonutProcessor
lowerCamelCase__ = """naver-clova-ix/donut-base"""
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = DonutProcessor.from_pretrained(a )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = {
'name': 'John Doe',
'age': '99',
'city': 'Atlanta',
'state': 'GA',
'zip': '30301',
'phone': '123-4567',
'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}],
}
lowerCAmelCase__ : Union[str, Any] = (
'<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'
'<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'
'<s_nicknames><s_nickname>Johnny</s_nickname>'
'<sep/><s_nickname>JD</s_nickname></s_nicknames>'
)
lowerCAmelCase__ : Optional[Any] = self.processor.tokenajson(a )
self.assertDictEqual(a , a ) | 69 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = ShapEImgaImgPipeline
lowercase = ['image']
lowercase = ['image']
lowercase = [
'num_images_per_prompt',
'num_inference_steps',
'generator',
'latents',
'guidance_scale',
'frame_size',
'output_type',
'return_dict',
]
lowercase = False
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return 8
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Dict = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=64 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=1 , )
lowerCAmelCase__ : int = CLIPVisionModel(a )
return model
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = CLIPImageProcessor(
crop_size=224 , do_center_crop=a , do_normalize=a , do_resize=a , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=224 , )
return image_processor
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[Any] = {
'num_attention_heads': 2,
'attention_head_dim': 16,
'embedding_dim': self.time_input_dim,
'num_embeddings': 32,
'embedding_proj_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'num_layers': 1,
'clip_embed_dim': self.time_input_dim * 2,
'additional_embeddings': 0,
'time_embed_act_fn': 'gelu',
'norm_in_type': 'layer',
'embedding_proj_norm_type': 'layer',
'encoder_hid_proj_type': None,
'added_emb_type': None,
}
lowerCAmelCase__ : Optional[int] = PriorTransformer(**a )
return model
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[int] = {
'param_shapes': (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
'd_latent': self.time_input_dim,
'd_hidden': self.renderer_dim,
'n_output': 12,
'background': (
0.1,
0.1,
0.1,
),
}
lowerCAmelCase__ : Dict = ShapERenderer(**a )
return model
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.dummy_prior
lowerCAmelCase__ : Any = self.dummy_image_encoder
lowerCAmelCase__ : Optional[Any] = self.dummy_image_processor
lowerCAmelCase__ : Any = self.dummy_renderer
lowerCAmelCase__ : Tuple = HeunDiscreteScheduler(
beta_schedule='exp' , num_train_timesteps=1_024 , prediction_type='sample' , use_karras_sigmas=a , clip_sample=a , clip_sample_range=1.0 , )
lowerCAmelCase__ : Any = {
'prior': prior,
'image_encoder': image_encoder,
'image_processor': image_processor,
'renderer': renderer,
'scheduler': scheduler,
}
return components
def _lowerCamelCase ( self : List[str] , a : Optional[int] , a : Optional[int]=0 ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = floats_tensor((1, 3, 64, 64) , rng=random.Random(a ) ).to(a )
if str(a ).startswith('mps' ):
lowerCAmelCase__ : List[str] = torch.manual_seed(a )
else:
lowerCAmelCase__ : List[str] = torch.Generator(device=a ).manual_seed(a )
lowerCAmelCase__ : Dict = {
'image': input_image,
'generator': generator,
'num_inference_steps': 1,
'frame_size': 32,
'output_type': 'np',
}
return inputs
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = 'cpu'
lowerCAmelCase__ : List[Any] = self.get_dummy_components()
lowerCAmelCase__ : str = self.pipeline_class(**a )
lowerCAmelCase__ : Union[str, Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Dict = pipe(**self.get_dummy_inputs(a ) )
lowerCAmelCase__ : Any = output.images[0]
lowerCAmelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
lowerCAmelCase__ : int = np.array(
[
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
0.0_0_0_3_9_2_1_6,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = torch_device == 'cpu'
lowerCAmelCase__ : Any = True
self._test_inference_batch_single_identical(
batch_size=2 , test_max_difference=a , relax_max_difference=a , )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.get_dummy_components()
lowerCAmelCase__ : Dict = self.pipeline_class(**a )
lowerCAmelCase__ : Dict = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Optional[int] = 1
lowerCAmelCase__ : Optional[int] = 2
lowerCAmelCase__ : Union[str, Any] = self.get_dummy_inputs(a )
for key in inputs.keys():
if key in self.batch_params:
lowerCAmelCase__ : int = batch_size * [inputs[key]]
lowerCAmelCase__ : Union[str, Any] = pipe(**a , num_images_per_prompt=a )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : int ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Any = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main' '/shap_e/corgi.png' )
lowerCAmelCase__ : Union[str, Any] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/shap_e/test_shap_e_img2img_out.npy' )
lowerCAmelCase__ : List[str] = ShapEImgaImgPipeline.from_pretrained('openai/shap-e-img2img' )
lowerCAmelCase__ : List[Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[str] = torch.Generator(device=a ).manual_seed(0 )
lowerCAmelCase__ : int = pipe(
a , generator=a , guidance_scale=3.0 , num_inference_steps=64 , frame_size=64 , output_type='np' , ).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(a , a ) | 69 |
from numpy import exp, pi, sqrt
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 1.0 ) -> int:
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 69 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
lowerCamelCase__ = False
@skip_mps
class A__ ( __magic_name__ , __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = StableDiffusionAttendAndExcitePipeline
lowercase = False
lowercase = TEXT_TO_IMAGE_PARAMS
lowercase = TEXT_TO_IMAGE_BATCH_PARAMS.union({'token_indices'} )
lowercase = TEXT_TO_IMAGE_IMAGE_PARAMS
lowercase = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def _lowerCamelCase ( cls : List[Any] ):
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(a )
@classmethod
def _lowerCamelCase ( cls : List[str] ):
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(a )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=1 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=a , )
lowerCAmelCase__ : List[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule='scaled_linear' , clip_sample=a , set_alpha_to_one=a , )
torch.manual_seed(0 )
lowerCAmelCase__ : Any = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
lowerCAmelCase__ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , hidden_act='gelu' , projection_dim=512 , )
lowerCAmelCase__ : Dict = CLIPTextModel(a )
lowerCAmelCase__ : Optional[Any] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
lowerCAmelCase__ : str = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def _lowerCamelCase ( self : Optional[Any] , a : int , a : Tuple=0 ):
'''simple docstring'''
if str(a ).startswith('mps' ):
lowerCAmelCase__ : Any = torch.manual_seed(a )
else:
lowerCAmelCase__ : Optional[int] = torch.Generator(device=a ).manual_seed(a )
lowerCAmelCase__ : Dict = {
'prompt': 'a cat and a frog',
'token_indices': [2, 5],
'generator': generator,
'num_inference_steps': 1,
'guidance_scale': 6.0,
'output_type': 'numpy',
'max_iter_to_alter': 2,
'thresholds': {0: 0.7},
}
return inputs
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = 'cpu'
lowerCAmelCase__ : str = self.get_dummy_components()
lowerCAmelCase__ : Any = self.pipeline_class(**a )
pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Union[str, Any] = self.get_dummy_inputs(a )
lowerCAmelCase__ : List[Any] = pipe(**a ).images
lowerCAmelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 64, 64, 3) )
lowerCAmelCase__ : Dict = np.array(
[0.6_3_9_0_5_3_6_4, 0.6_2_8_9_7_3_0_7, 0.4_8_5_9_9_0_1_7, 0.5_1_3_3_6_2_4, 0.5_5_5_0_0_4_8, 0.4_5_7_6_9_5_1_6, 0.5_0_3_2_6_9_7_3, 0.5_0_2_3_1_3_9, 0.4_5_3_8_4_4_9_6] )
lowerCAmelCase__ : int = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(a , 1E-3 )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
super().test_cpu_offload_forward_pass(expected_max_diff=5E-4 )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
self._test_inference_batch_single_identical(batch_size=2 , expected_max_diff=7E-4 )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3E-3 )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5E-4 )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
super().test_save_load_local(expected_max_difference=5E-4 )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
super().test_save_load_optional_components(expected_max_difference=4E-4 )
@require_torch_gpu
@slow
class A__ ( unittest.TestCase ):
@classmethod
def _lowerCamelCase ( cls : Any ):
'''simple docstring'''
super().setUpClass()
torch.use_deterministic_algorithms(a )
@classmethod
def _lowerCamelCase ( cls : Union[str, Any] ):
'''simple docstring'''
super().tearDownClass()
torch.use_deterministic_algorithms(a )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = torch.manual_seed(51 )
lowerCAmelCase__ : Dict = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' , safety_checker=a , torch_dtype=torch.floataa )
pipe.to('cuda' )
lowerCAmelCase__ : Tuple = 'a painting of an elephant with glasses'
lowerCAmelCase__ : Union[str, Any] = [5, 7]
lowerCAmelCase__ : List[Any] = pipe(
prompt=a , token_indices=a , guidance_scale=7.5 , generator=a , num_inference_steps=5 , max_iter_to_alter=5 , output_type='numpy' , ).images[0]
lowerCAmelCase__ : Optional[int] = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' )
assert np.abs((expected_image - image).max() ) < 5E-1 | 69 |
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = XLMTokenizer
lowercase = False
def _lowerCamelCase ( self : int ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase__ : List[str] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
lowerCAmelCase__ : Any = dict(zip(a , range(len(a ) ) ) )
lowerCAmelCase__ : Optional[int] = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
lowerCAmelCase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(a ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(a ) )
def _lowerCamelCase ( self : List[str] , a : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = 'lower newer'
lowerCAmelCase__ : Any = 'lower newer'
return input_text, output_text
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = XLMTokenizer(self.vocab_file , self.merges_file )
lowerCAmelCase__ : Optional[int] = 'lower'
lowerCAmelCase__ : Optional[Any] = ['low', 'er</w>']
lowerCAmelCase__ : Dict = tokenizer.tokenize(a )
self.assertListEqual(a , a )
lowerCAmelCase__ : Tuple = tokens + ['<unk>']
lowerCAmelCase__ : Optional[int] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
@slow
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = XLMTokenizer.from_pretrained('xlm-mlm-en-2048' )
lowerCAmelCase__ : Any = tokenizer.encode('sequence builders' , add_special_tokens=a )
lowerCAmelCase__ : Union[str, Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=a )
lowerCAmelCase__ : List[Any] = tokenizer.build_inputs_with_special_tokens(a )
lowerCAmelCase__ : List[str] = tokenizer.build_inputs_with_special_tokens(a , a )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1] | 69 | 1 |
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import is_accelerate_available, is_torch_available, is_transformers_available, is_xformers_available
from . import BaseDiffusersCLICommand
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[str]:
return EnvironmentCommand()
class A__ ( __magic_name__ ):
@staticmethod
def _lowerCamelCase ( a : ArgumentParser ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = parser.add_parser('env' )
download_parser.set_defaults(func=a )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = huggingface_hub.__version__
lowerCAmelCase__ : List[Any] = 'not installed'
lowerCAmelCase__ : Optional[int] = 'NA'
if is_torch_available():
import torch
lowerCAmelCase__ : Tuple = torch.__version__
lowerCAmelCase__ : int = torch.cuda.is_available()
lowerCAmelCase__ : Union[str, Any] = 'not installed'
if is_transformers_available():
import transformers
lowerCAmelCase__ : str = transformers.__version__
lowerCAmelCase__ : List[Any] = 'not installed'
if is_accelerate_available():
import accelerate
lowerCAmelCase__ : Optional[int] = accelerate.__version__
lowerCAmelCase__ : str = 'not installed'
if is_xformers_available():
import xformers
lowerCAmelCase__ : str = xformers.__version__
lowerCAmelCase__ : Tuple = {
'`diffusers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'PyTorch version (GPU?)': f'''{pt_version} ({pt_cuda_available})''',
'Huggingface_hub version': hub_version,
'Transformers version': transformers_version,
'Accelerate version': accelerate_version,
'xFormers version': xformers_version,
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(a ) )
return info
@staticmethod
def _lowerCamelCase ( a : int ):
'''simple docstring'''
return "\n".join([f'''- {prop}: {val}''' for prop, val in d.items()] ) + "\n" | 69 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[str]:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = image.size
lowerCAmelCase__ , lowerCAmelCase__ : str = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
lowerCAmelCase__ : Any = image.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] )
lowerCAmelCase__ : int = np.array(SCREAMING_SNAKE_CASE_ ).astype(np.floataa ) / 255.0
lowerCAmelCase__ : Optional[int] = image[None].transpose(0 , 3 , 1 , 2 )
lowerCAmelCase__ : List[Any] = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
return 2.0 * image - 1.0
class A__ ( __magic_name__ ):
def __init__( self : List[str] , a : VQModel , a : UNetaDModel , a : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=a , unet=a , scheduler=a )
@torch.no_grad()
def __call__( self : int , a : Union[torch.Tensor, PIL.Image.Image] = None , a : Optional[int] = 1 , a : Optional[int] = 100 , a : Optional[float] = 0.0 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[str] = "pil" , a : bool = True , ):
'''simple docstring'''
if isinstance(a , PIL.Image.Image ):
lowerCAmelCase__ : str = 1
elif isinstance(a , torch.Tensor ):
lowerCAmelCase__ : Union[str, Any] = image.shape[0]
else:
raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(a )}''' )
if isinstance(a , PIL.Image.Image ):
lowerCAmelCase__ : List[Any] = preprocess(a )
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
lowerCAmelCase__ : Optional[int] = (batch_size, self.unet.config.in_channels // 2, height, width)
lowerCAmelCase__ : Optional[Any] = next(self.unet.parameters() ).dtype
lowerCAmelCase__ : List[str] = randn_tensor(a , generator=a , device=self.device , dtype=a )
lowerCAmelCase__ : Any = image.to(device=self.device , dtype=a )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(a , device=self.device )
lowerCAmelCase__ : Optional[Any] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase__ : Optional[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCAmelCase__ : Union[str, Any] = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase__ : List[str] = {}
if accepts_eta:
lowerCAmelCase__ : List[Any] = eta
for t in self.progress_bar(a ):
# concat latents and low resolution image in the channel dimension.
lowerCAmelCase__ : Union[str, Any] = torch.cat([latents, image] , dim=1 )
lowerCAmelCase__ : Dict = self.scheduler.scale_model_input(a , a )
# predict the noise residual
lowerCAmelCase__ : Tuple = self.unet(a , a ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase__ : List[str] = self.scheduler.step(a , a , a , **a ).prev_sample
# decode the image latents with the VQVAE
lowerCAmelCase__ : Dict = self.vqvae.decode(a ).sample
lowerCAmelCase__ : Tuple = torch.clamp(a , -1.0 , 1.0 )
lowerCAmelCase__ : Tuple = image / 2 + 0.5
lowerCAmelCase__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase__ : int = self.numpy_to_pil(a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a ) | 69 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""EleutherAI/gpt-neox-20b""": """https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/config.json""",
# See all GPTNeoX models at https://huggingface.co/models?filter=gpt_neox
}
class A__ ( __magic_name__ ):
lowercase = 'gpt_neox'
def __init__( self : str , a : List[str]=50_432 , a : Dict=6_144 , a : int=44 , a : Dict=64 , a : Dict=24_576 , a : List[str]="gelu" , a : Dict=0.2_5 , a : int=10_000 , a : List[Any]=0.0 , a : Tuple=0.0 , a : str=0.1 , a : Optional[int]=2_048 , a : Any=0.0_2 , a : Any=1E-5 , a : Optional[Any]=True , a : List[str]=0 , a : Any=2 , a : int=False , a : Dict=True , a : Optional[Any]=None , **a : Tuple , ):
'''simple docstring'''
super().__init__(bos_token_id=a , eos_token_id=a , **a )
lowerCAmelCase__ : Optional[int] = vocab_size
lowerCAmelCase__ : str = max_position_embeddings
lowerCAmelCase__ : Any = hidden_size
lowerCAmelCase__ : int = num_hidden_layers
lowerCAmelCase__ : List[Any] = num_attention_heads
lowerCAmelCase__ : Tuple = intermediate_size
lowerCAmelCase__ : Optional[int] = hidden_act
lowerCAmelCase__ : List[str] = rotary_pct
lowerCAmelCase__ : List[Any] = rotary_emb_base
lowerCAmelCase__ : Union[str, Any] = attention_dropout
lowerCAmelCase__ : Union[str, Any] = hidden_dropout
lowerCAmelCase__ : str = classifier_dropout
lowerCAmelCase__ : Tuple = initializer_range
lowerCAmelCase__ : List[str] = layer_norm_eps
lowerCAmelCase__ : Tuple = use_cache
lowerCAmelCase__ : Any = tie_word_embeddings
lowerCAmelCase__ : Union[str, Any] = use_parallel_residual
lowerCAmelCase__ : int = rope_scaling
self._rope_scaling_validation()
if self.hidden_size % self.num_attention_heads != 0:
raise ValueError(
'The hidden size is not divisble by the number of attention heads! Make sure to update them!' )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
if self.rope_scaling is None:
return
if not isinstance(self.rope_scaling , a ) or len(self.rope_scaling ) != 2:
raise ValueError(
'`rope_scaling` must be a dictionary with with two fields, `name` and `factor`, '
f'''got {self.rope_scaling}''' )
lowerCAmelCase__ : Any = self.rope_scaling.get('type' , a )
lowerCAmelCase__ : List[Any] = self.rope_scaling.get('factor' , a )
if rope_scaling_type is None or rope_scaling_type not in ["linear", "dynamic"]:
raise ValueError(
f'''`rope_scaling`\'s name field must be one of [\'linear\', \'dynamic\'], got {rope_scaling_type}''' )
if rope_scaling_factor is None or not isinstance(a , a ) or rope_scaling_factor <= 1.0:
raise ValueError(f'''`rope_scaling`\'s factor field must be an float > 1, got {rope_scaling_factor}''' ) | 69 |
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class A__ ( __magic_name__ ):
def __init__( self : Union[str, Any] , a : str="" , a : str="train" ):
'''simple docstring'''
assert os.path.isdir(a )
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : Dict = os.listdir(a )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
lowerCAmelCase__ : Union[str, Any] = os.path.join(a , a )
if not os.path.isfile(a ):
continue
self.documents.append(a )
def __len__( self : Any ):
'''simple docstring'''
return len(self.documents )
def __getitem__( self : Dict , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.documents[idx]
lowerCAmelCase__ : Union[str, Any] = document_path.split('/' )[-1]
with open(a , encoding='utf-8' ) as source:
lowerCAmelCase__ : List[Any] = source.read()
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = process_story(a )
return document_name, story_lines, summary_lines
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Tuple:
lowerCAmelCase__ : Optional[int] = list(filter(lambda SCREAMING_SNAKE_CASE_ : len(SCREAMING_SNAKE_CASE_ ) != 0 , [line.strip() for line in raw_story.split('\n' )] ) )
# for some unknown reason some lines miss a period, add it
lowerCAmelCase__ : List[Any] = [_add_missing_period(SCREAMING_SNAKE_CASE_ ) for line in nonempty_lines]
# gather article lines
lowerCAmelCase__ : int = []
lowerCAmelCase__ : Any = deque(SCREAMING_SNAKE_CASE_ )
while True:
try:
lowerCAmelCase__ : int = lines.popleft()
if element.startswith('@highlight' ):
break
story_lines.append(SCREAMING_SNAKE_CASE_ )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
lowerCAmelCase__ : Tuple = list(filter(lambda SCREAMING_SNAKE_CASE_ : not t.startswith('@highlight' ) , SCREAMING_SNAKE_CASE_ ) )
return story_lines, summary_lines
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Any:
lowerCAmelCase__ : int = ['.', '!', '?', '...', '\'', '`', '"', '\u2019', '\u2019', ')']
if line.startswith('@highlight' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
if len(SCREAMING_SNAKE_CASE_ ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(SCREAMING_SNAKE_CASE_ )) )
return sequence
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
lowerCAmelCase__ : str = torch.ones_like(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : int = sequence == pad_token_id
lowerCAmelCase__ : Optional[int] = 0
return mask
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
lowerCAmelCase__ : Any = [tokenizer.encode(SCREAMING_SNAKE_CASE_ ) for line in story_lines]
lowerCAmelCase__ : str = [token for sentence in story_lines_token_ids for token in sentence]
lowerCAmelCase__ : Dict = [tokenizer.encode(SCREAMING_SNAKE_CASE_ ) for line in summary_lines]
lowerCAmelCase__ : str = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
lowerCAmelCase__ : Optional[int] = []
for sequence in batch:
lowerCAmelCase__ : Union[str, Any] = -1
lowerCAmelCase__ : int = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(SCREAMING_SNAKE_CASE_ )
return torch.tensor(SCREAMING_SNAKE_CASE_ ) | 69 | 1 |
# Imports
import numpy as np
class A__ :
def __init__( self : List[str] , a : Any=None , a : Union[str, Any]=None , a : int=None , a : Tuple=None , a : str=None ):
'''simple docstring'''
self.set_matricies(red=a , green=a , blue=a , red_edge=a , nir=a )
def _lowerCamelCase ( self : Optional[int] , a : Union[str, Any]=None , a : int=None , a : int=None , a : str=None , a : List[Any]=None ):
'''simple docstring'''
if red is not None:
lowerCAmelCase__ : int = red
if green is not None:
lowerCAmelCase__ : Optional[int] = green
if blue is not None:
lowerCAmelCase__ : Optional[Any] = blue
if red_edge is not None:
lowerCAmelCase__ : int = red_edge
if nir is not None:
lowerCAmelCase__ : List[str] = nir
return True
def _lowerCamelCase ( self : Optional[Any] , a : Optional[int]="" , a : Optional[int]=None , a : List[Any]=None , a : List[Any]=None , a : Optional[int]=None , a : Dict=None ):
'''simple docstring'''
self.set_matricies(red=a , green=a , blue=a , red_edge=a , nir=a )
lowerCAmelCase__ : Any = {
'ARVI2': self.arvaa,
'CCCI': self.ccci,
'CVI': self.cvi,
'GLI': self.gli,
'NDVI': self.ndvi,
'BNDVI': self.bndvi,
'redEdgeNDVI': self.red_edge_ndvi,
'GNDVI': self.gndvi,
'GBNDVI': self.gbndvi,
'GRNDVI': self.grndvi,
'RBNDVI': self.rbndvi,
'PNDVI': self.pndvi,
'ATSAVI': self.atsavi,
'BWDRVI': self.bwdrvi,
'CIgreen': self.ci_green,
'CIrededge': self.ci_rededge,
'CI': self.ci,
'CTVI': self.ctvi,
'GDVI': self.gdvi,
'EVI': self.evi,
'GEMI': self.gemi,
'GOSAVI': self.gosavi,
'GSAVI': self.gsavi,
'Hue': self.hue,
'IVI': self.ivi,
'IPVI': self.ipvi,
'I': self.i,
'RVI': self.rvi,
'MRVI': self.mrvi,
'MSAVI': self.m_savi,
'NormG': self.norm_g,
'NormNIR': self.norm_nir,
'NormR': self.norm_r,
'NGRDI': self.ngrdi,
'RI': self.ri,
'S': self.s,
'IF': self._if,
'DVI': self.dvi,
'TVI': self.tvi,
'NDRE': self.ndre,
}
try:
return funcs[index]()
except KeyError:
print('Index not in the list!' )
return False
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red)))
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return self.nir * (self.red / (self.green**2))
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return (self.nir - self.red) / (self.nir + self.red)
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return (self.nir - self.blue) / (self.nir + self.blue)
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return (self.redEdge - self.red) / (self.redEdge + self.red)
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green)
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def _lowerCamelCase ( self : List[str] , a : Union[str, Any]=0.0_8 , a : str=1.2_2 , a : Any=0.0_3 ):
'''simple docstring'''
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return (self.nir / self.green) - 1
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
return (self.nir / self.redEdge) - 1
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return (self.red - self.blue) / self.red
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return self.nir - self.green
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red)
def _lowerCamelCase ( self : Dict , a : int=0.1_6 ):
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green + y)
def _lowerCamelCase ( self : int , a : List[str]=0.5 ):
'''simple docstring'''
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return np.arctan(
((2 * self.red - self.green - self.blue) / 3_0.5) * (self.green - self.blue) )
def _lowerCamelCase ( self : Dict , a : int=None , a : Optional[int]=None ):
'''simple docstring'''
return (self.nir - b) / (a * self.red)
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return (self.red + self.green + self.blue) / 3_0.5
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return self.nir / self.red
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return (self.rvi() - 1) / (self.rvi() + 1)
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return self.green / (self.nir + self.red + self.green)
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return self.nir / (self.nir + self.red + self.green)
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return self.red / (self.nir + self.red + self.green)
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return (self.green - self.red) / (self.green + self.red)
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return (self.red - self.green) / (self.red + self.green)
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
lowerCAmelCase__ : List[Any] = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return self.nir / self.red
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return (self.ndvi() + 0.5) ** (1 / 2)
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return (self.nir - self.redEdge) / (self.nir + self.redEdge) | 69 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCamelCase__ = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase__ = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowerCamelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCamelCase__ = re.compile(r"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
lowerCamelCase__ = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[str]:
lowerCAmelCase__ : int = None
# source code of `config_class`
lowerCAmelCase__ : Optional[int] = inspect.getsource(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = _re_checkpoint.findall(SCREAMING_SNAKE_CASE_ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('/' ):
lowerCAmelCase__ : Union[str, Any] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
lowerCAmelCase__ : Dict = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
lowerCAmelCase__ : str = ckpt_name
break
return checkpoint
def lowerCAmelCase__ ( ) -> int:
lowerCAmelCase__ : Union[str, Any] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
lowerCAmelCase__ : Union[str, Any] = get_checkpoint_from_config_class(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[Any] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
lowerCAmelCase__ : List[str] = '\n'.join(sorted(SCREAMING_SNAKE_CASE_ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints() | 69 | 1 |
import json
from typing import List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_bart import BartTokenizer
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
# See all BART models at https://huggingface.co/models?filter=bart
lowerCamelCase__ = {
"""vocab_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/vocab.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/vocab.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json""",
},
"""merges_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/merges.txt""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/merges.txt""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""facebook/bart-base""": """https://huggingface.co/facebook/bart-base/resolve/main/tokenizer.json""",
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/tokenizer.json""",
"""facebook/bart-large-mnli""": """https://huggingface.co/facebook/bart-large-mnli/resolve/main/tokenizer.json""",
"""facebook/bart-large-cnn""": """https://huggingface.co/facebook/bart-large-cnn/resolve/main/tokenizer.json""",
"""facebook/bart-large-xsum""": """https://huggingface.co/facebook/bart-large-xsum/resolve/main/tokenizer.json""",
"""yjernite/bart_eli5""": """https://huggingface.co/yjernite/bart_eli5/resolve/main/tokenizer.json""",
},
}
lowerCamelCase__ = {
"""facebook/bart-base""": 1024,
"""facebook/bart-large""": 1024,
"""facebook/bart-large-mnli""": 1024,
"""facebook/bart-large-cnn""": 1024,
"""facebook/bart-large-xsum""": 1024,
"""yjernite/bart_eli5""": 1024,
}
class A__ ( __magic_name__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = ['input_ids', 'attention_mask']
lowercase = BartTokenizer
def __init__( self : Optional[Any] , a : Union[str, Any]=None , a : List[str]=None , a : Union[str, Any]=None , a : Optional[Any]="replace" , a : int="<s>" , a : Optional[int]="</s>" , a : int="</s>" , a : str="<s>" , a : Tuple="<unk>" , a : Tuple="<pad>" , a : str="<mask>" , a : List[str]=False , a : Optional[Any]=True , **a : int , ):
'''simple docstring'''
super().__init__(
a , a , tokenizer_file=a , errors=a , bos_token=a , eos_token=a , sep_token=a , cls_token=a , unk_token=a , pad_token=a , mask_token=a , add_prefix_space=a , trim_offsets=a , **a , )
lowerCAmelCase__ : Optional[int] = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , a ) != add_prefix_space:
lowerCAmelCase__ : Dict = getattr(a , pre_tok_state.pop('type' ) )
lowerCAmelCase__ : Union[str, Any] = add_prefix_space
lowerCAmelCase__ : Tuple = pre_tok_class(**a )
lowerCAmelCase__ : Optional[Any] = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
lowerCAmelCase__ : List[str] = 'post_processor'
lowerCAmelCase__ : List[str] = getattr(self.backend_tokenizer , a , a )
if tokenizer_component_instance:
lowerCAmelCase__ : int = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
lowerCAmelCase__ : Union[str, Any] = tuple(state['sep'] )
if "cls" in state:
lowerCAmelCase__ : Union[str, Any] = tuple(state['cls'] )
lowerCAmelCase__ : Any = False
if state.get('add_prefix_space' , a ) != add_prefix_space:
lowerCAmelCase__ : Optional[int] = add_prefix_space
lowerCAmelCase__ : Optional[Any] = True
if state.get('trim_offsets' , a ) != trim_offsets:
lowerCAmelCase__ : str = trim_offsets
lowerCAmelCase__ : Union[str, Any] = True
if changes_to_apply:
lowerCAmelCase__ : Dict = getattr(a , state.pop('type' ) )
lowerCAmelCase__ : int = component_class(**a )
setattr(self.backend_tokenizer , a , a )
@property
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def _lowerCamelCase ( self : str , a : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else value
lowerCAmelCase__ : Optional[Any] = value
def _lowerCamelCase ( self : Any , *a : Optional[int] , **a : Dict ):
'''simple docstring'''
lowerCAmelCase__ : str = kwargs.get('is_split_into_words' , a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*a , **a )
def _lowerCamelCase ( self : Dict , *a : str , **a : int ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = kwargs.get('is_split_into_words' , a )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
'to use it with pretokenized inputs.' )
return super()._encode_plus(*a , **a )
def _lowerCamelCase ( self : str , a : str , a : Optional[str] = None ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self._tokenizer.model.save(a , name=a )
return tuple(a )
def _lowerCamelCase ( self : Any , a : Optional[int] , a : int=None ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _lowerCamelCase ( self : Dict , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
lowerCAmelCase__ : str = [self.sep_token_id]
lowerCAmelCase__ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0] | 69 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
"""configuration_luke""": ["""LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LukeConfig"""],
"""tokenization_luke""": ["""LukeTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""LUKE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LukeForEntityClassification""",
"""LukeForEntityPairClassification""",
"""LukeForEntitySpanClassification""",
"""LukeForMultipleChoice""",
"""LukeForQuestionAnswering""",
"""LukeForSequenceClassification""",
"""LukeForTokenClassification""",
"""LukeForMaskedLM""",
"""LukeModel""",
"""LukePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 69 | 1 |
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = BarthezTokenizer
lowercase = BarthezTokenizerFast
lowercase = True
lowercase = True
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
super().setUp()
lowerCAmelCase__ : int = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=a )
lowerCAmelCase__ : int = tokenizer
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = '<pad>'
lowerCAmelCase__ : Tuple = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(a ) , a )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(a ) , a )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(a ) , 101_122 )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
self.assertEqual(self.get_tokenizer().vocab_size , 101_122 )
@require_torch
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : str = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
lowerCAmelCase__ : List[str] = [0, 57, 3_018, 70_307, 91, 2]
lowerCAmelCase__ : Any = self.tokenizer(
a , max_length=len(a ) , padding=a , truncation=a , return_tensors='pt' )
self.assertIsInstance(a , a )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
lowerCAmelCase__ : int = batch.input_ids.tolist()[0]
self.assertListEqual(a , a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
if not self.test_rust_tokenizer:
return
lowerCAmelCase__ : Optional[int] = self.get_tokenizer()
lowerCAmelCase__ : List[Any] = self.get_rust_tokenizer()
lowerCAmelCase__ : int = 'I was born in 92000, and this is falsé.'
lowerCAmelCase__ : List[str] = tokenizer.tokenize(a )
lowerCAmelCase__ : List[Any] = rust_tokenizer.tokenize(a )
self.assertListEqual(a , a )
lowerCAmelCase__ : List[Any] = tokenizer.encode(a , add_special_tokens=a )
lowerCAmelCase__ : Union[str, Any] = rust_tokenizer.encode(a , add_special_tokens=a )
self.assertListEqual(a , a )
lowerCAmelCase__ : Union[str, Any] = self.get_rust_tokenizer()
lowerCAmelCase__ : Optional[int] = tokenizer.encode(a )
lowerCAmelCase__ : List[Any] = rust_tokenizer.encode(a )
self.assertListEqual(a , a )
@slow
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : int = {'input_ids': [[0, 490, 14_328, 4_507, 354, 47, 43_669, 95, 25, 78_117, 20_215, 19_779, 190, 22, 400, 4, 35_343, 80_310, 603, 86, 24_937, 105, 33_438, 94_762, 196, 39_642, 7, 15, 15_933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10_534, 87, 25, 66, 3_358, 196, 55_289, 8, 82_961, 81, 2_204, 75_203, 7, 15, 763, 12_956, 216, 178, 14_328, 9_595, 1_377, 69_693, 7, 448, 71_021, 196, 18_106, 1_437, 13_974, 108, 9_083, 4, 49_315, 7, 39, 86, 1_326, 2_793, 46_333, 4, 448, 196, 74_588, 7, 49_315, 7, 39, 21, 822, 38_470, 74, 21, 66_723, 62_480, 8, 22_050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
lowerCAmelCase__ : int = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=a , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=a , ) | 69 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {
"""configuration_chinese_clip""": [
"""CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ChineseCLIPConfig""",
"""ChineseCLIPOnnxConfig""",
"""ChineseCLIPTextConfig""",
"""ChineseCLIPVisionConfig""",
],
"""processing_chinese_clip""": ["""ChineseCLIPProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""ChineseCLIPFeatureExtractor"""]
lowerCamelCase__ = ["""ChineseCLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ChineseCLIPModel""",
"""ChineseCLIPPreTrainedModel""",
"""ChineseCLIPTextModel""",
"""ChineseCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 69 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""microsoft/layoutlmv3-base""": """https://huggingface.co/microsoft/layoutlmv3-base/resolve/main/config.json""",
}
class A__ ( __magic_name__ ):
lowercase = 'layoutlmv3'
def __init__( self : List[str] , a : Optional[Any]=50_265 , a : Dict=768 , a : Optional[Any]=12 , a : Optional[Any]=12 , a : Optional[int]=3_072 , a : Union[str, Any]="gelu" , a : List[str]=0.1 , a : Optional[int]=0.1 , a : Tuple=512 , a : Tuple=2 , a : int=0.0_2 , a : Dict=1E-5 , a : Optional[Any]=1 , a : Optional[int]=0 , a : Optional[Any]=2 , a : Optional[Any]=1_024 , a : Optional[int]=128 , a : Optional[int]=128 , a : Tuple=True , a : Union[str, Any]=32 , a : List[str]=128 , a : Optional[int]=64 , a : Optional[Any]=256 , a : Dict=True , a : Union[str, Any]=True , a : List[Any]=True , a : List[Any]=224 , a : Any=3 , a : int=16 , a : Optional[Any]=None , **a : Union[str, Any] , ):
'''simple docstring'''
super().__init__(
vocab_size=a , hidden_size=a , num_hidden_layers=a , num_attention_heads=a , intermediate_size=a , hidden_act=a , hidden_dropout_prob=a , attention_probs_dropout_prob=a , max_position_embeddings=a , type_vocab_size=a , initializer_range=a , layer_norm_eps=a , pad_token_id=a , bos_token_id=a , eos_token_id=a , **a , )
lowerCAmelCase__ : Any = max_ad_position_embeddings
lowerCAmelCase__ : List[Any] = coordinate_size
lowerCAmelCase__ : Tuple = shape_size
lowerCAmelCase__ : Optional[Any] = has_relative_attention_bias
lowerCAmelCase__ : Tuple = rel_pos_bins
lowerCAmelCase__ : Optional[Any] = max_rel_pos
lowerCAmelCase__ : str = has_spatial_attention_bias
lowerCAmelCase__ : Union[str, Any] = rel_ad_pos_bins
lowerCAmelCase__ : str = max_rel_ad_pos
lowerCAmelCase__ : str = text_embed
lowerCAmelCase__ : Optional[int] = visual_embed
lowerCAmelCase__ : List[str] = input_size
lowerCAmelCase__ : Tuple = num_channels
lowerCAmelCase__ : Dict = patch_size
lowerCAmelCase__ : Optional[Any] = classifier_dropout
class A__ ( __magic_name__ ):
lowercase = version.parse('1.12' )
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
if self.task in ["question-answering", "sequence-classification"]:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
else:
return OrderedDict(
[
('input_ids', {0: 'batch', 1: 'sequence'}),
('bbox', {0: 'batch', 1: 'sequence'}),
('attention_mask', {0: 'batch', 1: 'sequence'}),
('pixel_values', {0: 'batch', 1: 'num_channels'}),
] )
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return 1E-5
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return 12
def _lowerCamelCase ( self : Optional[int] , a : "ProcessorMixin" , a : int = -1 , a : int = -1 , a : bool = False , a : Optional["TensorType"] = None , a : int = 3 , a : int = 40 , a : int = 40 , ):
'''simple docstring'''
setattr(processor.image_processor , 'apply_ocr' , a )
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
lowerCAmelCase__ : Optional[Any] = compute_effective_axis_dimension(
a , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
lowerCAmelCase__ : Dict = processor.tokenizer.num_special_tokens_to_add(a )
lowerCAmelCase__ : List[Any] = compute_effective_axis_dimension(
a , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=a )
# Generate dummy inputs according to compute batch and sequence
lowerCAmelCase__ : int = [[' '.join([processor.tokenizer.unk_token] ) * seq_length]] * batch_size
# Generate dummy bounding boxes
lowerCAmelCase__ : List[Any] = [[[48, 84, 73, 128]]] * batch_size
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
# batch_size = compute_effective_axis_dimension(batch_size, fixed_dimension=OnnxConfig.default_fixed_batch)
lowerCAmelCase__ : Optional[int] = self._generate_dummy_images(a , a , a , a )
lowerCAmelCase__ : List[Any] = dict(
processor(
a , text=a , boxes=a , return_tensors=a , ) )
return inputs | 69 |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
lowerCamelCase__ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
lowerCamelCase__ = """cuda""" if torch.cuda.is_available() else """cpu"""
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=100 , SCREAMING_SNAKE_CASE_=" " ) -> List[str]:
lowerCAmelCase__ : Optional[Any] = text.split(SCREAMING_SNAKE_CASE_ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> dict:
lowerCAmelCase__ , lowerCAmelCase__ : int = [], []
for title, text in zip(documents['title'] , documents['text'] ):
if text is not None:
for passage in split_text(SCREAMING_SNAKE_CASE_ ):
titles.append(title if title is not None else '' )
texts.append(SCREAMING_SNAKE_CASE_ )
return {"title": titles, "text": texts}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> dict:
lowerCAmelCase__ : List[str] = ctx_tokenizer(
documents['title'] , documents['text'] , truncation=SCREAMING_SNAKE_CASE_ , padding='longest' , return_tensors='pt' )['input_ids']
lowerCAmelCase__ : Tuple = ctx_encoder(input_ids.to(device=SCREAMING_SNAKE_CASE_ ) , return_dict=SCREAMING_SNAKE_CASE_ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]:
######################################
logger.info('Step 1 - Create the dataset' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowerCAmelCase__ : str = load_dataset(
'csv' , data_files=[rag_example_args.csv_path] , split='train' , delimiter='\t' , column_names=['title', 'text'] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowerCAmelCase__ : Optional[Any] = dataset.map(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , num_proc=processing_args.num_proc )
# And compute the embeddings
lowerCAmelCase__ : List[str] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
lowerCAmelCase__ : List[Any] = Features(
{'text': Value('string' ), 'title': Value('string' ), 'embeddings': Sequence(Value('float32' ) )} ) # optional, save as float32 instead of float64 to save space
lowerCAmelCase__ : List[Any] = dataset.map(
partial(SCREAMING_SNAKE_CASE_ , ctx_encoder=SCREAMING_SNAKE_CASE_ , ctx_tokenizer=SCREAMING_SNAKE_CASE_ ) , batched=SCREAMING_SNAKE_CASE_ , batch_size=processing_args.batch_size , features=SCREAMING_SNAKE_CASE_ , )
# And finally save your dataset
lowerCAmelCase__ : Optional[Any] = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset' )
dataset.save_to_disk(SCREAMING_SNAKE_CASE_ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('Step 2 - Index the dataset' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowerCAmelCase__ : Optional[int] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('embeddings' , custom_index=SCREAMING_SNAKE_CASE_ )
# And save the index
lowerCAmelCase__ : str = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset_hnsw_index.faiss' )
dataset.get_index('embeddings' ).save(SCREAMING_SNAKE_CASE_ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class A__ :
lowercase = field(
default=str(Path(__magic_name__ ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ) , metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} , )
lowercase = field(
default=__magic_name__ , metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} , )
lowercase = field(
default='facebook/rag-sequence-nq' , metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} , )
lowercase = field(
default='facebook/dpr-ctx_encoder-multiset-base' , metadata={
'help': (
'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'
' \'facebook/dpr-ctx_encoder-multiset-base\''
)
} , )
lowercase = field(
default=str(Path(__magic_name__ ).parent / 'test_run' / 'dummy-kb' ) , metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} , )
@dataclass
class A__ :
lowercase = field(
default=__magic_name__ , metadata={
'help': 'The number of processes to use to split the documents into passages. Default is single process.'
} , )
lowercase = field(
default=16 , metadata={
'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.'
} , )
@dataclass
class A__ :
lowercase = field(
default=768 , metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} , )
lowercase = field(
default=128 , metadata={
'help': (
'The number of bi-directional links created for every new element during the HNSW index construction.'
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
lowerCamelCase__ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
lowerCamelCase__ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args) | 69 | 1 |
import unittest
from transformers import DonutProcessor
lowerCamelCase__ = """naver-clova-ix/donut-base"""
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = DonutProcessor.from_pretrained(a )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = {
'name': 'John Doe',
'age': '99',
'city': 'Atlanta',
'state': 'GA',
'zip': '30301',
'phone': '123-4567',
'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}],
}
lowerCAmelCase__ : Union[str, Any] = (
'<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'
'<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'
'<s_nicknames><s_nickname>Johnny</s_nickname>'
'<sep/><s_nickname>JD</s_nickname></s_nicknames>'
)
lowerCAmelCase__ : Optional[Any] = self.processor.tokenajson(a )
self.assertDictEqual(a , a ) | 69 |
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A__ ( __magic_name__ ):
lowercase = (DDPMParallelScheduler,)
def _lowerCamelCase ( self : str , **a : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : str = {
'num_train_timesteps': 1_000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**a )
return config
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=a , beta_end=a )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=a )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
self.check_over_configs(thresholding=a )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=a , prediction_type=a , sample_max_value=a , )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=a )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.scheduler_classes[0]
lowerCAmelCase__ : Any = self.get_scheduler_config()
lowerCAmelCase__ : List[str] = scheduler_class(**a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1E-5
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.scheduler_classes[0]
lowerCAmelCase__ : Any = self.get_scheduler_config()
lowerCAmelCase__ : int = scheduler_class(**a )
lowerCAmelCase__ : str = len(a )
lowerCAmelCase__ : Tuple = self.dummy_model()
lowerCAmelCase__ : Optional[Any] = self.dummy_sample_deter
lowerCAmelCase__ : int = self.dummy_sample_deter + 0.1
lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter - 0.1
lowerCAmelCase__ : Tuple = samplea.shape[0]
lowerCAmelCase__ : List[Any] = torch.stack([samplea, samplea, samplea] , dim=0 )
lowerCAmelCase__ : Optional[Any] = torch.arange(a )[0:3, None].repeat(1 , a )
lowerCAmelCase__ : List[str] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
lowerCAmelCase__ : Tuple = scheduler.batch_step_no_noise(a , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
lowerCAmelCase__ : str = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Any = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1E-2
assert abs(result_mean.item() - 0.5_0_0_5 ) < 1E-3
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : str = self.scheduler_classes[0]
lowerCAmelCase__ : List[Any] = self.get_scheduler_config()
lowerCAmelCase__ : Dict = scheduler_class(**a )
lowerCAmelCase__ : str = len(a )
lowerCAmelCase__ : Any = self.dummy_model()
lowerCAmelCase__ : int = self.dummy_sample_deter
lowerCAmelCase__ : Tuple = torch.manual_seed(0 )
for t in reversed(range(a ) ):
# 1. predict noise residual
lowerCAmelCase__ : Optional[Any] = model(a , a )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase__ : int = scheduler.step(a , a , a , generator=a ).prev_sample
lowerCAmelCase__ : List[str] = pred_prev_sample
lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1E-3
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = self.scheduler_classes[0]
lowerCAmelCase__ : Dict = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCAmelCase__ : int = scheduler_class(**a )
lowerCAmelCase__ : str = len(a )
lowerCAmelCase__ : Optional[int] = self.dummy_model()
lowerCAmelCase__ : List[str] = self.dummy_sample_deter
lowerCAmelCase__ : Optional[Any] = torch.manual_seed(0 )
for t in reversed(range(a ) ):
# 1. predict noise residual
lowerCAmelCase__ : List[Any] = model(a , a )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase__ : Optional[int] = scheduler.step(a , a , a , generator=a ).prev_sample
lowerCAmelCase__ : str = pred_prev_sample
lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Any = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1E-3
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.scheduler_classes[0]
lowerCAmelCase__ : Any = self.get_scheduler_config()
lowerCAmelCase__ : Optional[int] = scheduler_class(**a )
lowerCAmelCase__ : Optional[Any] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=a )
lowerCAmelCase__ : List[Any] = scheduler.timesteps
for i, timestep in enumerate(a ):
if i == len(a ) - 1:
lowerCAmelCase__ : Tuple = -1
else:
lowerCAmelCase__ : Dict = timesteps[i + 1]
lowerCAmelCase__ : str = scheduler.previous_timestep(a )
lowerCAmelCase__ : int = prev_t.item()
self.assertEqual(a , a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.scheduler_classes[0]
lowerCAmelCase__ : Optional[int] = self.get_scheduler_config()
lowerCAmelCase__ : Optional[Any] = scheduler_class(**a )
lowerCAmelCase__ : str = [100, 87, 50, 51, 0]
with self.assertRaises(a , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=a )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.scheduler_classes[0]
lowerCAmelCase__ : str = self.get_scheduler_config()
lowerCAmelCase__ : Optional[int] = scheduler_class(**a )
lowerCAmelCase__ : str = [100, 87, 50, 1, 0]
lowerCAmelCase__ : int = len(a )
with self.assertRaises(a , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=a , timesteps=a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.scheduler_classes[0]
lowerCAmelCase__ : Dict = self.get_scheduler_config()
lowerCAmelCase__ : Optional[int] = scheduler_class(**a )
lowerCAmelCase__ : str = [scheduler.config.num_train_timesteps]
with self.assertRaises(
a , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=a ) | 69 | 1 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> int:
lowerCAmelCase__ : Any = abs(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[Any] = 0
while n > 0:
res += n % 10
n //= 10
return res
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> int:
lowerCAmelCase__ : List[str] = abs(SCREAMING_SNAKE_CASE_ )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> int:
return sum(int(SCREAMING_SNAKE_CASE_ ) for c in str(abs(SCREAMING_SNAKE_CASE_ ) ) )
def lowerCAmelCase__ ( ) -> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None:
lowerCAmelCase__ : List[str] = F'''{func.__name__}({value})'''
lowerCAmelCase__ : int = timeit(F'''__main__.{call}''' , setup='import __main__' )
print(F'''{call:56} = {func(SCREAMING_SNAKE_CASE_ )} -- {timing:.4f} seconds''' )
for value in (262_144, 1_125_899_906_842_624, 1_267_650_600_228_229_401_496_703_205_376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark() | 69 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( __magic_name__ ):
lowercase = ['image_processor', 'tokenizer']
lowercase = 'LayoutLMv3ImageProcessor'
lowercase = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self : Optional[int] , a : Union[str, Any]=None , a : Optional[Any]=None , **a : str ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , a , )
lowerCAmelCase__ : int = kwargs.pop('feature_extractor' )
lowerCAmelCase__ : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(a , a )
def __call__( self : List[Any] , a : List[Any] , a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , a : Union[List[List[int]], List[List[List[int]]]] = None , a : Optional[Union[List[int], List[List[int]]]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = None , a : Optional[int] = None , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : str , ):
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
# first, apply the image processor
lowerCAmelCase__ : List[str] = self.image_processor(images=a , return_tensors=a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(a , a ):
lowerCAmelCase__ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCAmelCase__ : List[str] = features['words']
lowerCAmelCase__ : List[Any] = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_token_type_ids=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_length=a , verbose=a , return_tensors=a , **a , )
# add pixel values
lowerCAmelCase__ : Tuple = features.pop('pixel_values' )
if return_overflowing_tokens is True:
lowerCAmelCase__ : List[str] = self.get_overflowing_images(a , encoded_inputs['overflow_to_sample_mapping'] )
lowerCAmelCase__ : List[str] = images
return encoded_inputs
def _lowerCamelCase ( self : Any , a : List[str] , a : int ):
'''simple docstring'''
lowerCAmelCase__ : int = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(a ) != len(a ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f''' {len(a )} and {len(a )}''' )
return images_with_overflow
def _lowerCamelCase ( self : Union[str, Any] , *a : Optional[Any] , **a : List[str] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a , **a )
def _lowerCamelCase ( self : Tuple , *a : List[str] , **a : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*a , **a )
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , a , )
return self.image_processor_class
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , a , )
return self.image_processor | 69 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowerCamelCase__ = {
"""configuration_roc_bert""": ["""ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """RoCBertConfig"""],
"""tokenization_roc_bert""": ["""RoCBertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
pass
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RoCBertForCausalLM""",
"""RoCBertForMaskedLM""",
"""RoCBertForMultipleChoice""",
"""RoCBertForPreTraining""",
"""RoCBertForQuestionAnswering""",
"""RoCBertForSequenceClassification""",
"""RoCBertForTokenClassification""",
"""RoCBertLayer""",
"""RoCBertModel""",
"""RoCBertPreTrainedModel""",
"""load_tf_weights_in_roc_bert""",
]
if TYPE_CHECKING:
from .configuration_roc_bert import ROC_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, RoCBertConfig
from .tokenization_roc_bert import RoCBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
raise OptionalDependencyNotAvailable()
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roc_bert import (
ROC_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
RoCBertForCausalLM,
RoCBertForMaskedLM,
RoCBertForMultipleChoice,
RoCBertForPreTraining,
RoCBertForQuestionAnswering,
RoCBertForSequenceClassification,
RoCBertForTokenClassification,
RoCBertLayer,
RoCBertModel,
RoCBertPreTrainedModel,
load_tf_weights_in_roc_bert,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 69 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class A__ ( __magic_name__ ):
def __init__( self : List[str] , a : Optional[Any] , a : int=13 , a : str=7 , a : Any=True , a : List[str]=True , a : Any=False , a : List[Any]=True , a : List[str]=99 , a : Optional[Any]=32 , a : List[str]=5 , a : List[Any]=4 , a : List[Any]=64 , a : List[Any]="gelu" , a : List[Any]=0.1 , a : List[Any]=0.1 , a : int=512 , a : Tuple=16 , a : List[str]=2 , a : int=0.0_2 , a : Union[str, Any]=3 , a : Any=4 , a : Union[str, Any]=None , a : Union[str, Any]=2 , a : List[str]=2 , a : int=2 , a : Dict=2 , a : List[str]=4 , a : str=1 , ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = parent
lowerCAmelCase__ : int = batch_size
lowerCAmelCase__ : str = seq_length
lowerCAmelCase__ : Tuple = is_training
lowerCAmelCase__ : List[str] = use_input_mask
lowerCAmelCase__ : Optional[int] = use_token_type_ids
lowerCAmelCase__ : Any = use_labels
lowerCAmelCase__ : List[Any] = vocab_size
lowerCAmelCase__ : str = hidden_size
lowerCAmelCase__ : str = num_hidden_layers
lowerCAmelCase__ : List[str] = num_attention_heads
lowerCAmelCase__ : int = intermediate_size
lowerCAmelCase__ : Optional[int] = hidden_act
lowerCAmelCase__ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase__ : Union[str, Any] = max_position_embeddings
lowerCAmelCase__ : Optional[int] = type_vocab_size
lowerCAmelCase__ : Dict = type_sequence_label_size
lowerCAmelCase__ : Optional[int] = initializer_range
lowerCAmelCase__ : List[Any] = num_labels
lowerCAmelCase__ : Any = num_choices
lowerCAmelCase__ : str = scope
lowerCAmelCase__ : Any = q_groups
lowerCAmelCase__ : Any = k_groups
lowerCAmelCase__ : Union[str, Any] = v_groups
lowerCAmelCase__ : int = post_attention_groups
lowerCAmelCase__ : str = intermediate_groups
lowerCAmelCase__ : Union[str, Any] = output_groups
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Tuple = None
if self.use_input_mask:
lowerCAmelCase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : List[Any] = None
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : Tuple = None
if self.use_labels:
lowerCAmelCase__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ : int = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ : str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def _lowerCamelCase ( self : Optional[int] , a : List[str] , a : List[str] , a : Any , a : Optional[int] , a : str , a : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = SqueezeBertModel(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : List[str] = model(a , a )
lowerCAmelCase__ : Any = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : str , a : Any , a : Tuple , a : int , a : Union[str, Any] , a : Tuple , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = SqueezeBertForMaskedLM(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Any = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : Optional[int] , a : Union[str, Any] , a : Optional[Any] , a : str , a : Optional[Any] , a : str , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Any = SqueezeBertForQuestionAnswering(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : List[str] = model(
a , attention_mask=a , start_positions=a , end_positions=a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self : Tuple , a : List[Any] , a : Optional[int] , a : Union[str, Any] , a : str , a : str , a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.num_labels
lowerCAmelCase__ : Dict = SqueezeBertForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Union[str, Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self : Any , a : int , a : Any , a : Dict , a : Any , a : Tuple , a : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = self.num_labels
lowerCAmelCase__ : Dict = SqueezeBertForTokenClassification(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self : str , a : Optional[int] , a : List[Any] , a : int , a : List[Any] , a : Union[str, Any] , a : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.num_choices
lowerCAmelCase__ : Union[str, Any] = SqueezeBertForMultipleChoice(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : List[str] = model(
a , attention_mask=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.prepare_config_and_inputs()
((lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__)) : List[Any] = config_and_inputs
lowerCAmelCase__ : Dict = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
lowercase = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = True
lowercase = False
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = SqueezeBertModelTester(self )
lowerCAmelCase__ : Dict = ConfigTester(self , config_class=a , dim=37 )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*a )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*a )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*a )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*a )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*a )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*a )
@slow
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Optional[int] = SqueezeBertModel.from_pretrained(a )
self.assertIsNotNone(a )
@require_sentencepiece
@require_tokenizers
@require_torch
class A__ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : int = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' )
lowerCAmelCase__ : str = torch.tensor([[1, 29_414, 232, 328, 740, 1_140, 12_695, 69, 13, 1_588, 2]] )
lowerCAmelCase__ : Any = model(a )[0]
lowerCAmelCase__ : Tuple = torch.Size((1, 3) )
self.assertEqual(output.shape , a )
lowerCAmelCase__ : int = torch.tensor([[0.6_4_0_1, -0.0_3_4_9, -0.6_0_4_1]] )
self.assertTrue(torch.allclose(a , a , atol=1E-4 ) ) | 69 | 1 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
lowerCamelCase__ = 16
lowerCamelCase__ = 32
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 16 , SCREAMING_SNAKE_CASE_ = "bert-base-cased" ) -> Optional[int]:
lowerCAmelCase__ : int = AutoTokenizer.from_pretrained(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = load_dataset('glue' , 'mrpc' )
def tokenize_function(SCREAMING_SNAKE_CASE_ ):
# max_length=None => use the model max length (it's actually the default)
lowerCAmelCase__ : int = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=SCREAMING_SNAKE_CASE_ , max_length=SCREAMING_SNAKE_CASE_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
lowerCAmelCase__ : Optional[Any] = datasets.map(
SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=SCREAMING_SNAKE_CASE_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
lowerCAmelCase__ : Dict = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(SCREAMING_SNAKE_CASE_ ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(SCREAMING_SNAKE_CASE_ , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(SCREAMING_SNAKE_CASE_ , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
lowerCAmelCase__ : Union[str, Any] = DataLoader(
tokenized_datasets['train'] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[Any] = DataLoader(
tokenized_datasets['validation'] , shuffle=SCREAMING_SNAKE_CASE_ , collate_fn=SCREAMING_SNAKE_CASE_ , batch_size=SCREAMING_SNAKE_CASE_ )
return train_dataloader, eval_dataloader
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Dict:
model.eval()
lowerCAmelCase__ : int = 0
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
lowerCAmelCase__ : int = model(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(SCREAMING_SNAKE_CASE_ ) - 1:
lowerCAmelCase__ : Optional[int] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
lowerCAmelCase__ : Optional[Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=SCREAMING_SNAKE_CASE_ , references=SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ : List[str] = metric.compute()
return eval_metric["accuracy"]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
# Initialize accelerator
lowerCAmelCase__ : int = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
lowerCAmelCase__ : List[str] = config['lr']
lowerCAmelCase__ : str = int(config['num_epochs'] )
lowerCAmelCase__ : List[Any] = int(config['seed'] )
lowerCAmelCase__ : Tuple = int(config['batch_size'] )
lowerCAmelCase__ : List[str] = args.model_name_or_path
set_seed(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = get_dataloaders(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
lowerCAmelCase__ : Dict = AutoModelForSequenceClassification.from_pretrained(SCREAMING_SNAKE_CASE_ , return_dict=SCREAMING_SNAKE_CASE_ )
# Instantiate optimizer
lowerCAmelCase__ : Tuple = (
AdamW
if accelerator.state.deepspeed_plugin is None
or 'optimizer' not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
lowerCAmelCase__ : Dict = optimizer_cls(params=model.parameters() , lr=SCREAMING_SNAKE_CASE_ )
if accelerator.state.deepspeed_plugin is not None:
lowerCAmelCase__ : List[str] = accelerator.state.deepspeed_plugin.deepspeed_config[
'gradient_accumulation_steps'
]
else:
lowerCAmelCase__ : Any = 1
lowerCAmelCase__ : Tuple = (len(SCREAMING_SNAKE_CASE_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
lowerCAmelCase__ : Union[str, Any] = get_linear_schedule_with_warmup(
optimizer=SCREAMING_SNAKE_CASE_ , num_warmup_steps=0 , num_training_steps=SCREAMING_SNAKE_CASE_ , )
else:
lowerCAmelCase__ : Optional[int] = DummyScheduler(SCREAMING_SNAKE_CASE_ , total_num_steps=SCREAMING_SNAKE_CASE_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = accelerator.prepare(
SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# We need to keep track of how many total steps we have iterated over
lowerCAmelCase__ : Dict = 0
# We also need to keep track of the stating epoch so files are named properly
lowerCAmelCase__ : Union[str, Any] = 0
lowerCAmelCase__ : Tuple = evaluate.load('glue' , 'mrpc' )
lowerCAmelCase__ : Any = num_epochs
if args.partial_train_epoch is not None:
lowerCAmelCase__ : Any = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
lowerCAmelCase__ : Optional[int] = args.resume_from_checkpoint.split('epoch_' )[1]
lowerCAmelCase__ : Dict = ''
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
lowerCAmelCase__ : Optional[Any] = int(SCREAMING_SNAKE_CASE_ ) + 1
lowerCAmelCase__ : Union[str, Any] = evaluation_loop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
accelerator.print('resumed checkpoint performance:' , SCREAMING_SNAKE_CASE_ )
accelerator.print('resumed checkpoint\'s scheduler\'s lr:' , lr_scheduler.get_lr()[0] )
accelerator.print('resumed optimizers\'s lr:' , optimizer.param_groups[0]['lr'] )
with open(os.path.join(args.output_dir , F'''state_{starting_epoch-1}.json''' ) , 'r' ) as f:
lowerCAmelCase__ : Any = json.load(SCREAMING_SNAKE_CASE_ )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
lowerCAmelCase__ : int = {}
for epoch in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
model.train()
for step, batch in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Any = model(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = outputs.loss
lowerCAmelCase__ : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(SCREAMING_SNAKE_CASE_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
lowerCAmelCase__ : int = F'''epoch_{epoch}'''
lowerCAmelCase__ : Optional[int] = os.path.join(args.output_dir , SCREAMING_SNAKE_CASE_ )
accelerator.save_state(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = evaluation_loop(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = accuracy
lowerCAmelCase__ : Optional[int] = lr_scheduler.get_lr()[0]
lowerCAmelCase__ : Optional[int] = optimizer.param_groups[0]['lr']
lowerCAmelCase__ : Tuple = epoch
lowerCAmelCase__ : Any = overall_step
accelerator.print(F'''epoch {epoch}:''' , SCREAMING_SNAKE_CASE_ )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F'''state_{epoch}.json''' ) , 'w' ) as f:
json.dump(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( ) -> int:
lowerCAmelCase__ : List[str] = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=SCREAMING_SNAKE_CASE_ , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=SCREAMING_SNAKE_CASE_ , )
parser.add_argument(
'--output_dir' , type=SCREAMING_SNAKE_CASE_ , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--resume_from_checkpoint' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help='If the training should continue from a checkpoint folder.' , )
parser.add_argument(
'--partial_train_epoch' , type=SCREAMING_SNAKE_CASE_ , default=SCREAMING_SNAKE_CASE_ , help='If passed, the training will stop after this number of epochs.' , )
parser.add_argument(
'--num_epochs' , type=SCREAMING_SNAKE_CASE_ , default=2 , help='Number of train epochs.' , )
lowerCAmelCase__ : List[str] = parser.parse_args()
lowerCAmelCase__ : List[Any] = {'lr': 2e-5, 'num_epochs': args.num_epochs, 'seed': 42, 'batch_size': 16}
training_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main() | 69 |
lowerCamelCase__ = """Alexander Joslin"""
import operator as op
from .stack import Stack
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> int:
lowerCAmelCase__ : Union[str, Any] = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
lowerCAmelCase__ : Stack[int] = Stack()
lowerCAmelCase__ : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(SCREAMING_SNAKE_CASE_ ) )
elif i in operators:
# RULE 2
operator_stack.push(SCREAMING_SNAKE_CASE_ )
elif i == ")":
# RULE 4
lowerCAmelCase__ : List[Any] = operator_stack.peek()
operator_stack.pop()
lowerCAmelCase__ : List[str] = operand_stack.peek()
operand_stack.pop()
lowerCAmelCase__ : List[Any] = operand_stack.peek()
operand_stack.pop()
lowerCAmelCase__ : Tuple = operators[opr](SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
operand_stack.push(SCREAMING_SNAKE_CASE_ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase__ = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""") | 69 | 1 |
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
lowerCamelCase__ = open # noqa: we just need to have a builtin inside this module to test it properly | 69 |
import numpy
class A__ :
def __init__( self : Tuple , a : numpy.ndarray , a : numpy.ndarray ):
'''simple docstring'''
lowerCAmelCase__ : int = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowerCAmelCase__ : Dict = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowerCAmelCase__ : List[str] = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowerCAmelCase__ : List[Any] = numpy.random.rand(3 , 1 )
# Real output values provided.
lowerCAmelCase__ : str = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowerCAmelCase__ : List[Any] = numpy.zeros(output_array.shape )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowerCAmelCase__ : Tuple = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowerCAmelCase__ : Any = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowerCAmelCase__ : Optional[Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowerCAmelCase__ : int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def _lowerCamelCase ( self : Optional[int] , a : numpy.ndarray , a : int , a : bool ):
'''simple docstring'''
for iteration in range(1 , iterations + 1 ):
lowerCAmelCase__ : Any = self.feedforward()
self.back_propagation()
if give_loss:
lowerCAmelCase__ : Tuple = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f'''Iteration {iteration} Loss: {loss}''' )
def _lowerCamelCase ( self : Optional[Any] , a : numpy.ndarray ):
'''simple docstring'''
lowerCAmelCase__ : Dict = input_arr
lowerCAmelCase__ : Any = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowerCAmelCase__ : int = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowerCAmelCase__ : List[Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> numpy.ndarray:
return 1 / (1 + numpy.exp(-value ))
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> numpy.ndarray:
return (value) * (1 - (value))
def lowerCAmelCase__ ( ) -> int:
lowerCAmelCase__ : Any = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowerCAmelCase__ : int = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
lowerCAmelCase__ : List[str] = TwoHiddenLayerNeuralNetwork(
input_array=SCREAMING_SNAKE_CASE_ , output_array=SCREAMING_SNAKE_CASE_ )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=SCREAMING_SNAKE_CASE_ , iterations=10 , give_loss=SCREAMING_SNAKE_CASE_ )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example() | 69 | 1 |
import random
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False ) -> dict:
lowerCAmelCase__ : dict = {i: [] for i in range(SCREAMING_SNAKE_CASE_ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(SCREAMING_SNAKE_CASE_ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(SCREAMING_SNAKE_CASE_ ):
for j in range(i + 1 , SCREAMING_SNAKE_CASE_ ):
if random.random() < probability:
graph[i].append(SCREAMING_SNAKE_CASE_ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(SCREAMING_SNAKE_CASE_ )
return graph
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> dict:
return {
i: [j for j in range(SCREAMING_SNAKE_CASE_ ) if i != j] for i in range(SCREAMING_SNAKE_CASE_ )
}
if __name__ == "__main__":
import doctest
doctest.testmod() | 69 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
def __init__( self : int , a : str , a : Union[str, Any]=13 , a : int=32 , a : Optional[Any]=2 , a : Tuple=3 , a : List[Any]=16 , a : List[str]=[1, 2, 1] , a : int=[2, 2, 4] , a : int=2 , a : Optional[Any]=2.0 , a : Optional[int]=True , a : Dict=0.0 , a : Any=0.0 , a : int=0.1 , a : List[str]="gelu" , a : Optional[Any]=False , a : str=True , a : Dict=0.0_2 , a : Any=1E-5 , a : Optional[int]=True , a : str=None , a : str=True , a : int=10 , a : str=8 , ):
'''simple docstring'''
lowerCAmelCase__ : str = parent
lowerCAmelCase__ : Union[str, Any] = batch_size
lowerCAmelCase__ : List[str] = image_size
lowerCAmelCase__ : Optional[Any] = patch_size
lowerCAmelCase__ : Tuple = num_channels
lowerCAmelCase__ : Optional[int] = embed_dim
lowerCAmelCase__ : Tuple = depths
lowerCAmelCase__ : List[str] = num_heads
lowerCAmelCase__ : List[Any] = window_size
lowerCAmelCase__ : Any = mlp_ratio
lowerCAmelCase__ : Optional[Any] = qkv_bias
lowerCAmelCase__ : Any = hidden_dropout_prob
lowerCAmelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase__ : int = drop_path_rate
lowerCAmelCase__ : Optional[Any] = hidden_act
lowerCAmelCase__ : int = use_absolute_embeddings
lowerCAmelCase__ : List[str] = patch_norm
lowerCAmelCase__ : Optional[int] = layer_norm_eps
lowerCAmelCase__ : List[str] = initializer_range
lowerCAmelCase__ : Optional[Any] = is_training
lowerCAmelCase__ : List[Any] = scope
lowerCAmelCase__ : Dict = use_labels
lowerCAmelCase__ : List[Any] = type_sequence_label_size
lowerCAmelCase__ : Optional[Any] = encoder_stride
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : Optional[Any] = None
if self.use_labels:
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : int = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _lowerCamelCase ( self : List[str] , a : Any , a : str , a : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = SwinvaModel(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[int] = model(a )
lowerCAmelCase__ : str = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCAmelCase__ : Tuple = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _lowerCamelCase ( self : Union[str, Any] , a : Optional[Any] , a : Tuple , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Any = SwinvaForMaskedImageModeling(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : str = model(a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCAmelCase__ : Any = 1
lowerCAmelCase__ : Dict = SwinvaForMaskedImageModeling(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ : List[str] = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _lowerCamelCase ( self : Union[str, Any] , a : int , a : str , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : str = self.type_sequence_label_size
lowerCAmelCase__ : List[Any] = SwinvaForImageClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Union[str, Any] = model(a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = config_and_inputs
lowerCAmelCase__ : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
lowercase = (
{'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = SwinvaModelTester(self )
lowerCAmelCase__ : int = ConfigTester(self , config_class=a , embed_dim=37 )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds' )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : int = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Optional[int] = model_class(a )
lowerCAmelCase__ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : Tuple = [*signature.parameters.keys()]
lowerCAmelCase__ : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Optional[int] = True
for model_class in self.all_model_classes:
lowerCAmelCase__ : Tuple = True
lowerCAmelCase__ : str = False
lowerCAmelCase__ : List[Any] = True
lowerCAmelCase__ : Dict = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : int = model(**self._prepare_for_class(a , a ) )
lowerCAmelCase__ : Dict = outputs.attentions
lowerCAmelCase__ : Dict = len(self.model_tester.depths )
self.assertEqual(len(a ) , a )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase__ : List[str] = True
lowerCAmelCase__ : Optional[int] = config.window_size**2
lowerCAmelCase__ : str = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model(**self._prepare_for_class(a , a ) )
lowerCAmelCase__ : Optional[Any] = outputs.attentions
self.assertEqual(len(a ) , a )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
lowerCAmelCase__ : Tuple = len(a )
# Check attention is always last and order is fine
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : str = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(**self._prepare_for_class(a , a ) )
if hasattr(self.model_tester , 'num_hidden_states_types' ):
lowerCAmelCase__ : Optional[Any] = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
lowerCAmelCase__ : Any = 2
self.assertEqual(out_len + added_hidden_states , len(a ) )
lowerCAmelCase__ : Dict = outputs.attentions
self.assertEqual(len(a ) , a )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def _lowerCamelCase ( self : int , a : Optional[int] , a : int , a : Optional[Any] , a : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model(**self._prepare_for_class(a , a ) )
lowerCAmelCase__ : Optional[Any] = outputs.hidden_states
lowerCAmelCase__ : str = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(a ) , a )
# Swinv2 has a different seq_length
lowerCAmelCase__ : int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase__ : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCAmelCase__ : Union[str, Any] = outputs.reshaped_hidden_states
self.assertEqual(len(a ) , a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = reshaped_hidden_states[0].shape
lowerCAmelCase__ : List[str] = (
reshaped_hidden_states[0].view(a , a , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowerCAmelCase__ : Any = True
self.check_hidden_states_output(a , a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ : List[str] = True
self.check_hidden_states_output(a , a , a , a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Any = 3
lowerCAmelCase__ : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCAmelCase__ : str = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase__ : str = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCAmelCase__ : Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowerCAmelCase__ : str = True
self.check_hidden_states_output(a , a , a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ : Any = True
self.check_hidden_states_output(a , a , a , (padded_height, padded_width) )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
@slow
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : List[str] = SwinvaModel.from_pretrained(a )
self.assertIsNotNone(a )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Optional[int] = _config_zero_init(a )
for model_class in self.all_model_classes:
lowerCAmelCase__ : int = model_class(config=a )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class A__ ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' )
if is_vision_available()
else None
)
@slow
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to(
a )
lowerCAmelCase__ : Dict = self.default_image_processor
lowerCAmelCase__ : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCAmelCase__ : Any = image_processor(images=a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowerCAmelCase__ : Union[str, Any] = model(**a )
# verify the logits
lowerCAmelCase__ : List[str] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , a )
lowerCAmelCase__ : Optional[Any] = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1E-4 ) ) | 69 | 1 |
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCamelCase__ = logging.get_logger(__name__)
@add_end_docstrings(__magic_name__ )
class A__ ( __magic_name__ ):
def __init__( self : Dict , **a : Dict ):
'''simple docstring'''
super().__init__(**a )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : List[Any] , a : Union[str, List[str], "Image", List["Image"]] , **a : Union[str, Any] ):
'''simple docstring'''
return super().__call__(a , **a )
def _lowerCamelCase ( self : Optional[int] , **a : str ):
'''simple docstring'''
lowerCAmelCase__ : int = {}
if "candidate_labels" in kwargs:
lowerCAmelCase__ : Optional[Any] = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
lowerCAmelCase__ : Optional[int] = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def _lowerCamelCase ( self : Dict , a : Dict , a : Optional[Any]=None , a : List[Any]="This is a photo of {}." ):
'''simple docstring'''
lowerCAmelCase__ : str = load_image(a )
lowerCAmelCase__ : Union[str, Any] = self.image_processor(images=[image] , return_tensors=self.framework )
lowerCAmelCase__ : Union[str, Any] = candidate_labels
lowerCAmelCase__ : List[str] = [hypothesis_template.format(a ) for x in candidate_labels]
lowerCAmelCase__ : Optional[int] = self.tokenizer(a , return_tensors=self.framework , padding=a )
lowerCAmelCase__ : List[str] = [text_inputs]
return inputs
def _lowerCamelCase ( self : Optional[int] , a : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = model_inputs.pop('candidate_labels' )
lowerCAmelCase__ : Any = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , a ):
lowerCAmelCase__ : Tuple = text_inputs[0]
else:
# Batching case.
lowerCAmelCase__ : Union[str, Any] = text_inputs[0][0]
lowerCAmelCase__ : List[str] = self.model(**a , **a )
lowerCAmelCase__ : Dict = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def _lowerCamelCase ( self : Dict , a : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = model_outputs.pop('candidate_labels' )
lowerCAmelCase__ : List[str] = model_outputs['logits'][0]
if self.framework == "pt":
lowerCAmelCase__ : str = logits.softmax(dim=-1 ).squeeze(-1 )
lowerCAmelCase__ : Optional[Any] = probs.tolist()
if not isinstance(a , a ):
lowerCAmelCase__ : Union[str, Any] = [scores]
elif self.framework == "tf":
lowerCAmelCase__ : Tuple = stable_softmax(a , axis=-1 )
lowerCAmelCase__ : Any = probs.numpy().tolist()
else:
raise ValueError(f'''Unsupported framework: {self.framework}''' )
lowerCAmelCase__ : Optional[Any] = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(a , a ) , key=lambda a : -x[0] )
]
return result | 69 |
from itertools import permutations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bool:
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
lowerCAmelCase__ : str = [7, 11, 13, 17]
for i, test in enumerate(SCREAMING_SNAKE_CASE_ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = 10 ) -> int:
return sum(
int(''.join(map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) )
for num in permutations(range(SCREAMING_SNAKE_CASE_ ) )
if is_substring_divisible(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
print(F"""{solution() = }""") | 69 | 1 |
import warnings
from ...utils import logging
from .image_processing_videomae import VideoMAEImageProcessor
lowerCamelCase__ = logging.get_logger(__name__)
class A__ ( __magic_name__ ):
def __init__( self : Union[str, Any] , *a : List[str] , **a : Dict ):
'''simple docstring'''
warnings.warn(
'The class VideoMAEFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use VideoMAEImageProcessor instead.' , a , )
super().__init__(*a , **a ) | 69 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = ConsistencyModelPipeline
lowercase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowercase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
lowercase = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'output_type',
'return_dict',
'callback',
'callback_steps',
] )
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet' , )
return unet
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , )
return unet
def _lowerCamelCase ( self : Optional[Any] , a : Union[str, Any]=False ):
'''simple docstring'''
if class_cond:
lowerCAmelCase__ : Tuple = self.dummy_cond_unet
else:
lowerCAmelCase__ : Dict = self.dummy_uncond_unet
# Default to CM multistep sampler
lowerCAmelCase__ : Optional[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
lowerCAmelCase__ : List[Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def _lowerCamelCase ( self : int , a : Optional[int] , a : Any=0 ):
'''simple docstring'''
if str(a ).startswith('mps' ):
lowerCAmelCase__ : List[str] = torch.manual_seed(a )
else:
lowerCAmelCase__ : str = torch.Generator(device=a ).manual_seed(a )
lowerCAmelCase__ : str = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [22, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : Optional[Any] = self.get_dummy_components()
lowerCAmelCase__ : List[Any] = ConsistencyModelPipeline(**a )
lowerCAmelCase__ : Tuple = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : str = self.get_dummy_inputs(a )
lowerCAmelCase__ : str = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ : str = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Tuple = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : Tuple = self.get_dummy_components(class_cond=a )
lowerCAmelCase__ : Union[str, Any] = ConsistencyModelPipeline(**a )
lowerCAmelCase__ : Tuple = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a )
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : Union[str, Any] = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
lowerCAmelCase__ : str = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : Union[str, Any] = self.get_dummy_components()
lowerCAmelCase__ : Tuple = ConsistencyModelPipeline(**a )
lowerCAmelCase__ : Dict = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Union[str, Any] = self.get_dummy_inputs(a )
lowerCAmelCase__ : Optional[Any] = 1
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : List[Any] = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Optional[Any] = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : Optional[int] = self.get_dummy_components(class_cond=a )
lowerCAmelCase__ : List[Any] = ConsistencyModelPipeline(**a )
lowerCAmelCase__ : Optional[Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Tuple = self.get_dummy_inputs(a )
lowerCAmelCase__ : Dict = 1
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ : str = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Dict = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Optional[Any] , a : Tuple=0 , a : Optional[Any]=False , a : Optional[Any]="cpu" , a : Union[str, Any]=torch.floataa , a : Dict=(1, 3, 64, 64) ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(a )
lowerCAmelCase__ : List[Any] = {
'num_inference_steps': None,
'timesteps': [22, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
lowerCAmelCase__ : Optional[int] = self.get_fixed_latents(seed=a , device=a , dtype=a , shape=a )
lowerCAmelCase__ : Tuple = latents
return inputs
def _lowerCamelCase ( self : str , a : Tuple=0 , a : Tuple="cpu" , a : Tuple=torch.floataa , a : str=(1, 3, 64, 64) ):
'''simple docstring'''
if type(a ) == str:
lowerCAmelCase__ : str = torch.device(a )
lowerCAmelCase__ : List[str] = torch.Generator(device=a ).manual_seed(a )
lowerCAmelCase__ : Any = randn_tensor(a , generator=a , device=a , dtype=a )
return latents
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : int = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
lowerCAmelCase__ : List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
lowerCAmelCase__ : List[Any] = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Optional[Any] = self.get_inputs()
lowerCAmelCase__ : Dict = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : List[str] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Union[str, Any] = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
lowerCAmelCase__ : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
lowerCAmelCase__ : Optional[int] = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[str] = self.get_inputs()
lowerCAmelCase__ : Union[str, Any] = 1
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : List[str] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : Optional[int] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Union[str, Any] = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
@require_torch_a
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
lowerCAmelCase__ : List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
lowerCAmelCase__ : Tuple = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : str = self.get_inputs(get_fixed_latents=a , device=a )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ):
lowerCAmelCase__ : Dict = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : str = image[0, -3:, -3:, -1]
lowerCAmelCase__ : str = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@require_torch_a
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
lowerCAmelCase__ : List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
lowerCAmelCase__ : Dict = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Any = self.get_inputs(get_fixed_latents=a , device=a )
lowerCAmelCase__ : List[str] = 1
lowerCAmelCase__ : str = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ):
lowerCAmelCase__ : List[str] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : Dict = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Optional[int] = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 | 69 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {
"""configuration_chinese_clip""": [
"""CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ChineseCLIPConfig""",
"""ChineseCLIPOnnxConfig""",
"""ChineseCLIPTextConfig""",
"""ChineseCLIPVisionConfig""",
],
"""processing_chinese_clip""": ["""ChineseCLIPProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""ChineseCLIPFeatureExtractor"""]
lowerCamelCase__ = ["""ChineseCLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ChineseCLIPModel""",
"""ChineseCLIPPreTrainedModel""",
"""ChineseCLIPTextModel""",
"""ChineseCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 69 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class A__ ( __magic_name__ ):
def __init__( self : int , a : List[str] , a : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = params
lowerCAmelCase__ : Union[str, Any] = np.array(a )
lowerCAmelCase__ : List[Any] = np.array([len(a ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : str , a : List[str] ):
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self : Optional[int] ):
'''simple docstring'''
return len(self.lengths )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.params.max_model_input_size
lowerCAmelCase__ : Optional[int] = self.lengths > max_len
logger.info(f'''Splitting {sum(a )} too long sequences.''' )
def divide_chunks(a : List[str] , a : Tuple ):
return [l[i : i + n] for i in range(0 , len(a ) , a )]
lowerCAmelCase__ : Union[str, Any] = []
lowerCAmelCase__ : Union[str, Any] = []
if self.params.mlm:
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
else:
lowerCAmelCase__ , lowerCAmelCase__ : int = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
lowerCAmelCase__ : Optional[int] = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
lowerCAmelCase__ : Dict = np.insert(a , 0 , a )
if sub_s[-1] != sep_id:
lowerCAmelCase__ : Dict = np.insert(a , len(a ) , a )
assert len(a ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(a )
new_tok_ids.extend(a )
new_lengths.extend([len(a ) for l in sub_seqs] )
lowerCAmelCase__ : str = np.array(a )
lowerCAmelCase__ : Optional[Any] = np.array(a )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = len(self )
lowerCAmelCase__ : List[Any] = self.lengths > 11
lowerCAmelCase__ : Dict = self.token_ids[indices]
lowerCAmelCase__ : Tuple = self.lengths[indices]
lowerCAmelCase__ : Any = len(self )
logger.info(f'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
lowerCAmelCase__ : int = self.params.special_tok_ids['unk_token']
lowerCAmelCase__ : str = len(self )
lowerCAmelCase__ : List[str] = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
lowerCAmelCase__ : int = (unk_occs / self.lengths) < 0.5
lowerCAmelCase__ : List[str] = self.token_ids[indices]
lowerCAmelCase__ : Optional[Any] = self.lengths[indices]
lowerCAmelCase__ : Union[str, Any] = len(self )
logger.info(f'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
if not self.params.is_master:
return
logger.info(f'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def _lowerCamelCase ( self : int , a : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = [t[0] for t in batch]
lowerCAmelCase__ : List[str] = [t[1] for t in batch]
assert len(a ) == len(a )
# Max for paddings
lowerCAmelCase__ : List[str] = max(a )
# Pad token ids
if self.params.mlm:
lowerCAmelCase__ : str = self.params.special_tok_ids['pad_token']
else:
lowerCAmelCase__ : Optional[int] = self.params.special_tok_ids['unk_token']
lowerCAmelCase__ : Tuple = [list(t.astype(a ) ) + [pad_idx] * (max_seq_len_ - len(a )) for t in token_ids]
assert len(tk_ ) == len(a )
assert all(len(a ) == max_seq_len_ for t in tk_ )
lowerCAmelCase__ : Union[str, Any] = torch.tensor(tk_ ) # (bs, max_seq_len_)
lowerCAmelCase__ : List[str] = torch.tensor(a ) # (bs)
return tk_t, lg_t | 69 | 1 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> float:
lowerCAmelCase__ : Union[str, Any] = [redshift, radiation_density, matter_density, dark_energy]
if any(p < 0 for p in parameters ):
raise ValueError('All input parameters must be positive' )
if any(p > 1 for p in parameters[1:4] ):
raise ValueError('Relative densities cannot be greater than one' )
else:
lowerCAmelCase__ : List[Any] = 1 - (matter_density + radiation_density + dark_energy)
lowerCAmelCase__ : List[str] = (
radiation_density * (redshift + 1) ** 4
+ matter_density * (redshift + 1) ** 3
+ curvature * (redshift + 1) ** 2
+ dark_energy
)
lowerCAmelCase__ : Optional[Any] = hubble_constant * e_a ** (1 / 2)
return hubble
if __name__ == "__main__":
import doctest
# run doctest
doctest.testmod()
# demo LCDM approximation
lowerCamelCase__ = 0.3
print(
hubble_parameter(
hubble_constant=68.3,
radiation_density=1E-4,
matter_density=matter_density,
dark_energy=1 - matter_density,
redshift=0,
)
) | 69 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
) | 69 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
lowerCamelCase__ = """Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine"""
def lowerCAmelCase__ ( ) -> Optional[int]:
lowerCAmelCase__ : Dict = _ask_options(
'In which compute environment are you running?' , ['This machine', 'AWS (Amazon SageMaker)'] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
lowerCAmelCase__ : Dict = get_sagemaker_input()
else:
lowerCAmelCase__ : Tuple = get_cluster_input()
return config
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_=None ) -> Optional[int]:
if subparsers is not None:
lowerCAmelCase__ : Any = subparsers.add_parser('config' , description=SCREAMING_SNAKE_CASE_ )
else:
lowerCAmelCase__ : Optional[Any] = argparse.ArgumentParser('Accelerate config command' , description=SCREAMING_SNAKE_CASE_ )
parser.add_argument(
'--config_file' , default=SCREAMING_SNAKE_CASE_ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , )
if subparsers is not None:
parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
return parser
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> int:
lowerCAmelCase__ : List[Any] = get_user_input()
if args.config_file is not None:
lowerCAmelCase__ : Union[str, Any] = args.config_file
else:
if not os.path.isdir(SCREAMING_SNAKE_CASE_ ):
os.makedirs(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[Any] = default_yaml_config_file
if config_file.endswith('.json' ):
config.to_json_file(SCREAMING_SNAKE_CASE_ )
else:
config.to_yaml_file(SCREAMING_SNAKE_CASE_ )
print(F'''accelerate configuration saved at {config_file}''' )
def lowerCAmelCase__ ( ) -> Union[str, Any]:
lowerCAmelCase__ : Optional[Any] = config_command_parser()
lowerCAmelCase__ : Tuple = parser.parse_args()
config_command(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
main() | 69 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
lowerCamelCase__ = None
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""vocab_file""": """sentencepiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
"""tokenizer_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/tokenizer.json""",
},
}
lowerCamelCase__ = {
"""google/rembert""": 256,
}
lowerCamelCase__ = """▁"""
class A__ ( __magic_name__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = RemBertTokenizer
def __init__( self : Optional[Any] , a : str=None , a : Any=None , a : List[Any]=True , a : str=True , a : Dict=False , a : Dict="[CLS]" , a : int="[SEP]" , a : Tuple="<unk>" , a : Optional[Any]="[SEP]" , a : Tuple="<pad>" , a : Dict="[CLS]" , a : Optional[Any]="[MASK]" , **a : str , ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
a , tokenizer_file=a , do_lower_case=a , remove_space=a , keep_accents=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , **a , )
lowerCAmelCase__ : int = do_lower_case
lowerCAmelCase__ : int = remove_space
lowerCAmelCase__ : List[Any] = keep_accents
lowerCAmelCase__ : Optional[Any] = vocab_file
lowerCAmelCase__ : Union[str, Any] = False if not self.vocab_file else True
def _lowerCamelCase ( self : Dict , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
lowerCAmelCase__ : Dict = [self.sep_token_id]
lowerCAmelCase__ : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowerCamelCase ( self : str , a : List[int] , a : Optional[List[int]] = None , a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(a )) + [1] + ([0] * len(a )) + [1]
return [1] + ([0] * len(a )) + [1]
def _lowerCamelCase ( self : List[Any] , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = [self.sep_token_id]
lowerCAmelCase__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCamelCase ( self : Tuple , a : str , a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(a ):
logger.error('Vocabulary path ({}) should be a directory'.format(a ) )
return
lowerCAmelCase__ : int = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ):
copyfile(self.vocab_file , a )
return (out_vocab_file,) | 69 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from tokenizers import processors
from ...tokenization_utils import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_mbart import MBartTokenizer
else:
lowerCamelCase__ = None
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""vocab_file""": """sentencepiece.bpe.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""facebook/mbart-large-en-ro""": (
"""https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/sentencepiece.bpe.model"""
),
"""facebook/mbart-large-cc25""": (
"""https://huggingface.co/facebook/mbart-large-cc25/resolve/main/sentencepiece.bpe.model"""
),
},
"""tokenizer_file""": {
"""facebook/mbart-large-en-ro""": """https://huggingface.co/facebook/mbart-large-en-ro/resolve/main/tokenizer.json""",
"""facebook/mbart-large-cc25""": """https://huggingface.co/facebook/mbart-large-cc25/resolve/main/tokenizer.json""",
},
}
lowerCamelCase__ = {
"""facebook/mbart-large-en-ro""": 1024,
"""facebook/mbart-large-cc25""": 1024,
}
# fmt: off
lowerCamelCase__ = ["""ar_AR""", """cs_CZ""", """de_DE""", """en_XX""", """es_XX""", """et_EE""", """fi_FI""", """fr_XX""", """gu_IN""", """hi_IN""", """it_IT""", """ja_XX""", """kk_KZ""", """ko_KR""", """lt_LT""", """lv_LV""", """my_MM""", """ne_NP""", """nl_XX""", """ro_RO""", """ru_RU""", """si_LK""", """tr_TR""", """vi_VN""", """zh_CN"""]
class A__ ( __magic_name__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = ['input_ids', 'attention_mask']
lowercase = MBartTokenizer
lowercase = []
lowercase = []
def __init__( self : int , a : Tuple=None , a : Optional[int]=None , a : int="<s>" , a : Optional[int]="</s>" , a : int="</s>" , a : Tuple="<s>" , a : Dict="<unk>" , a : int="<pad>" , a : List[Any]="<mask>" , a : Optional[int]=None , a : Optional[int]=None , a : List[Any]=None , **a : Union[str, Any] , ):
'''simple docstring'''
lowerCAmelCase__ : int = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
vocab_file=a , tokenizer_file=a , bos_token=a , eos_token=a , sep_token=a , cls_token=a , unk_token=a , pad_token=a , mask_token=a , src_lang=a , tgt_lang=a , additional_special_tokens=a , **a , )
lowerCAmelCase__ : Tuple = vocab_file
lowerCAmelCase__ : List[Any] = False if not self.vocab_file else True
lowerCAmelCase__ : int = FAIRSEQ_LANGUAGE_CODES.copy()
if additional_special_tokens is not None:
# Only add those special tokens if they are not already there.
_additional_special_tokens.extend(
[t for t in additional_special_tokens if t not in _additional_special_tokens] )
self.add_special_tokens({'additional_special_tokens': _additional_special_tokens} )
lowerCAmelCase__ : List[str] = {
lang_code: self.convert_tokens_to_ids(a ) for lang_code in FAIRSEQ_LANGUAGE_CODES
}
lowerCAmelCase__ : Optional[int] = src_lang if src_lang is not None else 'en_XX'
lowerCAmelCase__ : Union[str, Any] = self.convert_tokens_to_ids(self._src_lang )
lowerCAmelCase__ : str = tgt_lang
self.set_src_lang_special_tokens(self._src_lang )
@property
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
return self._src_lang
@src_lang.setter
def _lowerCamelCase ( self : Optional[int] , a : str ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = new_src_lang
self.set_src_lang_special_tokens(self._src_lang )
def _lowerCamelCase ( self : Any , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return self.prefix_tokens + token_ids_a + self.suffix_tokens
# We don't expect to process pairs, but leave the pair logic for API consistency
return self.prefix_tokens + token_ids_a + token_ids_a + self.suffix_tokens
def _lowerCamelCase ( self : int , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = [self.sep_token_id]
lowerCAmelCase__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowerCamelCase ( self : Optional[int] , a : Union[str, Any] , a : str , a : Optional[str] , a : Optional[str] , **a : List[Any] ):
'''simple docstring'''
if src_lang is None or tgt_lang is None:
raise ValueError('Translation requires a `src_lang` and a `tgt_lang` for this model' )
lowerCAmelCase__ : Union[str, Any] = src_lang
lowerCAmelCase__ : Optional[Any] = self(a , add_special_tokens=a , return_tensors=a , **a )
lowerCAmelCase__ : List[str] = self.convert_tokens_to_ids(a )
lowerCAmelCase__ : List[Any] = tgt_lang_id
return inputs
def _lowerCamelCase ( self : Optional[Any] , a : List[str] , a : str = "en_XX" , a : Optional[List[str]] = None , a : str = "ro_RO" , **a : Tuple , ):
'''simple docstring'''
lowerCAmelCase__ : str = src_lang
lowerCAmelCase__ : Tuple = tgt_lang
return super().prepare_seqaseq_batch(a , a , **a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
return self.set_src_lang_special_tokens(self.src_lang )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return self.set_tgt_lang_special_tokens(self.tgt_lang )
def _lowerCamelCase ( self : Union[str, Any] , a : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.convert_tokens_to_ids(a )
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : Union[str, Any] = [self.eos_token_id, self.cur_lang_code]
lowerCAmelCase__ : Union[str, Any] = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCAmelCase__ : List[Any] = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCAmelCase__ : Dict = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowerCamelCase ( self : Tuple , a : str ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.convert_tokens_to_ids(a )
lowerCAmelCase__ : str = []
lowerCAmelCase__ : Any = [self.eos_token_id, self.cur_lang_code]
lowerCAmelCase__ : Optional[Any] = self.convert_ids_to_tokens(self.prefix_tokens )
lowerCAmelCase__ : List[str] = self.convert_ids_to_tokens(self.suffix_tokens )
lowerCAmelCase__ : Optional[int] = processors.TemplateProcessing(
single=prefix_tokens_str + ['$A'] + suffix_tokens_str , pair=prefix_tokens_str + ['$A', '$B'] + suffix_tokens_str , special_tokens=list(zip(prefix_tokens_str + suffix_tokens_str , self.prefix_tokens + self.suffix_tokens ) ) , )
def _lowerCamelCase ( self : Any , a : str , a : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(a ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory.''' )
return
lowerCAmelCase__ : List[str] = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ):
copyfile(self.vocab_file , a )
return (out_vocab_file,) | 69 |
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class A__ ( __magic_name__ ):
def __init__( self : List[Any] , a : Callable , a : Optional[Features] = None , a : str = None , a : bool = False , a : bool = False , a : Optional[dict] = None , a : Optional[int] = None , **a : str , ):
'''simple docstring'''
super().__init__(
features=a , cache_dir=a , keep_in_memory=a , streaming=a , num_proc=a , **a , )
lowerCAmelCase__ : int = Generator(
cache_dir=a , features=a , generator=a , gen_kwargs=a , **a , )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
if self.streaming:
lowerCAmelCase__ : List[Any] = self.builder.as_streaming_dataset(split='train' )
# Build regular (map-style) dataset
else:
lowerCAmelCase__ : Any = None
lowerCAmelCase__ : int = None
lowerCAmelCase__ : List[Any] = None
lowerCAmelCase__ : Dict = None
self.builder.download_and_prepare(
download_config=a , download_mode=a , verification_mode=a , base_path=a , num_proc=self.num_proc , )
lowerCAmelCase__ : Union[str, Any] = self.builder.as_dataset(
split='train' , verification_mode=a , in_memory=self.keep_in_memory )
return dataset | 69 | 1 |
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class A__ ( __magic_name__ ):
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(a , 'hidden_sizes' ) )
self.parent.assertTrue(hasattr(a , 'num_attention_heads' ) )
self.parent.assertTrue(hasattr(a , 'num_encoder_blocks' ) )
class A__ :
def __init__( self : Dict , a : Optional[int] , a : Union[str, Any]=13 , a : Union[str, Any]=64 , a : Union[str, Any]=3 , a : List[Any]=4 , a : Optional[int]=[2, 2, 2, 2] , a : Union[str, Any]=[8, 4, 2, 1] , a : Tuple=[16, 32, 64, 128] , a : Any=[1, 4, 8, 16] , a : Any=[1, 2, 4, 8] , a : Dict=True , a : List[str]=True , a : Dict="gelu" , a : Dict=0.1 , a : int=0.1 , a : Dict=0.0_2 , a : Union[str, Any]=3 , a : List[str]=None , ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = parent
lowerCAmelCase__ : List[Any] = batch_size
lowerCAmelCase__ : Optional[Any] = image_size
lowerCAmelCase__ : str = num_channels
lowerCAmelCase__ : int = num_encoder_blocks
lowerCAmelCase__ : List[str] = sr_ratios
lowerCAmelCase__ : List[Any] = depths
lowerCAmelCase__ : str = hidden_sizes
lowerCAmelCase__ : int = downsampling_rates
lowerCAmelCase__ : str = num_attention_heads
lowerCAmelCase__ : Optional[int] = is_training
lowerCAmelCase__ : Union[str, Any] = use_labels
lowerCAmelCase__ : Optional[int] = hidden_act
lowerCAmelCase__ : Optional[int] = hidden_dropout_prob
lowerCAmelCase__ : str = attention_probs_dropout_prob
lowerCAmelCase__ : Optional[Any] = initializer_range
lowerCAmelCase__ : Dict = num_labels
lowerCAmelCase__ : Dict = scope
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : Optional[Any] = None
if self.use_labels:
lowerCAmelCase__ : List[str] = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
lowerCAmelCase__ : Optional[int] = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return SegformerConfig(
image_size=self.image_size , num_channels=self.num_channels , num_encoder_blocks=self.num_encoder_blocks , depths=self.depths , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , )
def _lowerCamelCase ( self : Any , a : str , a : str , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = SegformerModel(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[int] = model(a )
lowerCAmelCase__ : Dict = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def _lowerCamelCase ( self : Dict , a : List[str] , a : List[str] , a : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.num_labels
lowerCAmelCase__ : Union[str, Any] = SegformerForSemanticSegmentation(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Tuple = model(a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
lowerCAmelCase__ : Union[str, Any] = model(a , labels=a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss , 0.0 )
def _lowerCamelCase ( self : str , a : Union[str, Any] , a : int , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = 1
lowerCAmelCase__ : List[Any] = SegformerForSemanticSegmentation(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Any = torch.randint(0 , 1 , (self.batch_size, self.image_size, self.image_size) ).to(a )
lowerCAmelCase__ : List[str] = model(a , labels=a )
self.parent.assertGreater(result.loss , 0.0 )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = config_and_inputs
lowerCAmelCase__ : Dict = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
lowercase = (
{
'feature-extraction': SegformerModel,
'image-classification': SegformerForImageClassification,
'image-segmentation': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
lowercase = True
lowercase = False
lowercase = False
lowercase = False
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = SegformerModelTester(self )
lowerCAmelCase__ : Optional[int] = SegformerConfigTester(self , config_class=a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*a )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*a )
@unittest.skip('SegFormer does not use inputs_embeds' )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods' )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : str = model_class(a )
lowerCAmelCase__ : int = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : Tuple = [*signature.parameters.keys()]
lowerCAmelCase__ : Any = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : str = True
for model_class in self.all_model_classes:
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : int = False
lowerCAmelCase__ : List[str] = True
lowerCAmelCase__ : Dict = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model(**self._prepare_for_class(a , a ) )
lowerCAmelCase__ : Optional[int] = outputs.attentions
lowerCAmelCase__ : Union[str, Any] = sum(self.model_tester.depths )
self.assertEqual(len(a ) , a )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : List[Any] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : str = model(**self._prepare_for_class(a , a ) )
lowerCAmelCase__ : Union[str, Any] = outputs.attentions
self.assertEqual(len(a ) , a )
# verify the first attentions (first block, first layer)
lowerCAmelCase__ : int = (self.model_tester.image_size // 4) ** 2
lowerCAmelCase__ : int = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
# verify the last attentions (last block, last layer)
lowerCAmelCase__ : int = (self.model_tester.image_size // 32) ** 2
lowerCAmelCase__ : List[str] = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) , [self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] , )
lowerCAmelCase__ : List[Any] = len(a )
# Check attention is always last and order is fine
lowerCAmelCase__ : Optional[Any] = True
lowerCAmelCase__ : Any = True
lowerCAmelCase__ : Tuple = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Any = model(**self._prepare_for_class(a , a ) )
self.assertEqual(out_len + 1 , len(a ) )
lowerCAmelCase__ : List[str] = outputs.attentions
self.assertEqual(len(a ) , a )
# verify the first attentions (first block, first layer)
lowerCAmelCase__ : str = (self.model_tester.image_size // 4) ** 2
lowerCAmelCase__ : Tuple = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] , )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
def check_hidden_states_output(a : List[str] , a : List[str] , a : List[str] ):
lowerCAmelCase__ : Union[str, Any] = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(**self._prepare_for_class(a , a ) )
lowerCAmelCase__ : Any = outputs.hidden_states
lowerCAmelCase__ : Optional[int] = self.model_tester.num_encoder_blocks
self.assertEqual(len(a ) , a )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : int = True
check_hidden_states_output(a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ : Dict = True
check_hidden_states_output(a , a , a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
lowerCAmelCase__ , lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : str = True
for model_class in self.all_model_classes:
if model_class in get_values(a ):
continue
lowerCAmelCase__ : Tuple = model_class(a )
model.to(a )
model.train()
lowerCAmelCase__ : Any = self._prepare_for_class(a , a , return_labels=a )
lowerCAmelCase__ : Optional[Any] = model(**a ).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
pass
@slow
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : List[str] = SegformerModel.from_pretrained(a )
self.assertIsNotNone(a )
def lowerCAmelCase__ ( ) -> Optional[int]:
lowerCAmelCase__ : Any = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class A__ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=a , align=a , do_random_crop=a )
lowerCAmelCase__ : Optional[Any] = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
a )
lowerCAmelCase__ : str = prepare_img()
lowerCAmelCase__ : Tuple = image_processor(images=a , return_tensors='pt' )
lowerCAmelCase__ : List[str] = encoded_inputs.pixel_values.to(a )
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model(a )
lowerCAmelCase__ : Union[str, Any] = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , a )
lowerCAmelCase__ : List[Any] = torch.tensor(
[
[[-4.6_3_1_0, -5.5_2_3_2, -6.2_3_5_6], [-5.1_9_2_1, -6.1_4_4_4, -6.5_9_9_6], [-5.4_4_2_4, -6.2_7_9_0, -6.7_5_7_4]],
[[-1_2.1_3_9_1, -1_3.3_1_2_2, -1_3.9_5_5_4], [-1_2.8_7_3_2, -1_3.9_3_5_2, -1_4.3_5_6_3], [-1_2.9_4_3_8, -1_3.8_2_2_6, -1_4.2_5_1_3]],
[[-1_2.5_1_3_4, -1_3.4_6_8_6, -1_4.4_9_1_5], [-1_2.8_6_6_9, -1_4.4_3_4_3, -1_4.7_7_5_8], [-1_3.2_5_2_3, -1_4.5_8_1_9, -1_5.0_6_9_4]],
] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , a , atol=1E-4 ) )
@slow
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=a , align=a , do_random_crop=a )
lowerCAmelCase__ : int = SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024' ).to(a )
lowerCAmelCase__ : Dict = prepare_img()
lowerCAmelCase__ : List[Any] = image_processor(images=a , return_tensors='pt' )
lowerCAmelCase__ : Any = encoded_inputs.pixel_values.to(a )
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model(a )
lowerCAmelCase__ : Optional[Any] = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape , a )
lowerCAmelCase__ : Optional[Any] = torch.tensor(
[
[[-1_3.5_7_4_8, -1_3.9_1_1_1, -1_2.6_5_0_0], [-1_4.3_5_0_0, -1_5.3_6_8_3, -1_4.2_3_2_8], [-1_4.7_5_3_2, -1_6.0_4_2_4, -1_5.6_0_8_7]],
[[-1_7.1_6_5_1, -1_5.8_7_2_5, -1_2.9_6_5_3], [-1_7.2_5_8_0, -1_7.3_7_1_8, -1_4.8_2_2_3], [-1_6.6_0_5_8, -1_6.8_7_8_3, -1_6.7_4_5_2]],
[[-3.6_4_5_6, -3.0_2_0_9, -1.4_2_0_3], [-3.0_7_9_7, -3.1_9_5_9, -2.0_0_0_0], [-1.8_7_5_7, -1.9_2_1_7, -1.6_9_9_7]],
] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] , a , atol=1E-1 ) )
@slow
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=a , align=a , do_random_crop=a )
lowerCAmelCase__ : Optional[int] = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
a )
lowerCAmelCase__ : Tuple = prepare_img()
lowerCAmelCase__ : Dict = image_processor(images=a , return_tensors='pt' )
lowerCAmelCase__ : Any = encoded_inputs.pixel_values.to(a )
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(a )
lowerCAmelCase__ : Dict = outputs.logits.detach().cpu()
lowerCAmelCase__ : int = image_processor.post_process_semantic_segmentation(outputs=a , target_sizes=[(500, 300)] )
lowerCAmelCase__ : List[Any] = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape , a )
lowerCAmelCase__ : Union[str, Any] = image_processor.post_process_semantic_segmentation(outputs=a )
lowerCAmelCase__ : Any = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape , a ) | 69 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCamelCase__ = logging.get_logger(__name__)
class A__ ( __magic_name__ ):
lowercase = ['audio_values', 'audio_mask']
def __init__( self : Dict , a : Dict=2_048 , a : Optional[Any]=1 , a : List[Any]=[16, 16] , a : Dict=128 , a : List[str]=44_100 , a : Union[str, Any]=86 , a : Optional[Any]=2_048 , a : List[Any]=0.0 , **a : Tuple , ):
'''simple docstring'''
super().__init__(
feature_size=a , sampling_rate=a , padding_value=a , **a , )
lowerCAmelCase__ : Optional[Any] = spectrogram_length
lowerCAmelCase__ : str = num_channels
lowerCAmelCase__ : Tuple = patch_size
lowerCAmelCase__ : Optional[int] = feature_size // self.patch_size[1]
lowerCAmelCase__ : Union[str, Any] = n_fft
lowerCAmelCase__ : Union[str, Any] = sampling_rate // hop_length_to_sampling_rate
lowerCAmelCase__ : int = sampling_rate
lowerCAmelCase__ : Union[str, Any] = padding_value
lowerCAmelCase__ : Dict = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=a , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=a , norm='slaney' , mel_scale='slaney' , ).T
def _lowerCamelCase ( self : Optional[int] , a : np.array ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = spectrogram(
a , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=8_0.0 , )
lowerCAmelCase__ : Any = log_spec[:, :-1]
lowerCAmelCase__ : Dict = log_spec - 2_0.0
lowerCAmelCase__ : Tuple = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : str , a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a : Optional[Union[str, TensorType]] = None , a : Optional[bool] = True , a : Optional[int] = None , a : bool = False , a : bool = False , **a : int , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
f''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
f''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
lowerCAmelCase__ : Dict = isinstance(a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCAmelCase__ : List[Any] = is_batched_numpy or (
isinstance(a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase__ : int = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(a , np.ndarray ):
lowerCAmelCase__ : Optional[Any] = np.asarray(a , dtype=np.floataa )
elif isinstance(a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase__ : str = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase__ : Tuple = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowerCAmelCase__ : int = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , a ):
lowerCAmelCase__ : Optional[Any] = [np.asarray(a , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowerCAmelCase__ : Optional[Any] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowerCAmelCase__ : Any = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowerCAmelCase__ : List[Any] = np.array(a ).astype(np.floataa )
# convert into correct format for padding
lowerCAmelCase__ : int = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowerCAmelCase__ : Dict = np.ones([len(a ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowerCAmelCase__ : Optional[Any] = padded_audio_features * self.padding_value
for i in range(len(a ) ):
lowerCAmelCase__ : Tuple = audio_features[i]
lowerCAmelCase__ : List[str] = feature
# return as BatchFeature
if return_attention_mask:
lowerCAmelCase__ : Tuple = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
lowerCAmelCase__ : Any = {'audio_values': padded_audio_features}
lowerCAmelCase__ : Any = BatchFeature(data=a , tensor_type=a )
return encoded_inputs | 69 | 1 |
import argparse
from collections import defaultdict
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
lowerCAmelCase__ : int = F'''{file}_{class_name}_{test_name}'''
done_test[_id] += 1
with open(SCREAMING_SNAKE_CASE_ , 'r' ) as f:
lowerCAmelCase__ : Optional[Any] = f.readlines()
lowerCAmelCase__ : List[str] = F'''class {class_name}('''
lowerCAmelCase__ : Union[str, Any] = F'''{4 * " "}def {test_name}('''
lowerCAmelCase__ : List[str] = F'''{8 * " "}{correct_line.split()[0]}'''
lowerCAmelCase__ : Optional[Any] = F'''{16 * " "}{correct_line.split()[0]}'''
lowerCAmelCase__ : Any = False
lowerCAmelCase__ : Tuple = False
lowerCAmelCase__ : List[Any] = False
lowerCAmelCase__ : List[str] = False
lowerCAmelCase__ : Dict = 0
lowerCAmelCase__ : List[str] = 0
lowerCAmelCase__ : Tuple = []
for line in lines:
if line.startswith(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Any = True
elif in_class and line.startswith(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Any = True
elif in_class and in_func and (line.startswith(SCREAMING_SNAKE_CASE_ ) or line.startswith(SCREAMING_SNAKE_CASE_ )):
lowerCAmelCase__ : Optional[Any] = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
lowerCAmelCase__ : Tuple = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
lowerCAmelCase__ : Optional[Any] = True
if in_class and in_func and in_line and insert_line:
new_lines.append(F'''{spaces * " "}{correct_line}''' )
lowerCAmelCase__ : Dict = False
else:
new_lines.append(SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ , 'w' ) as f:
for line in new_lines:
f.write(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> str:
if fail is not None:
with open(SCREAMING_SNAKE_CASE_ , 'r' ) as f:
lowerCAmelCase__ : Union[str, Any] = {l.strip() for l in f.readlines()}
else:
lowerCAmelCase__ : Tuple = None
with open(SCREAMING_SNAKE_CASE_ , 'r' ) as f:
lowerCAmelCase__ : Dict = f.readlines()
lowerCAmelCase__ : Optional[int] = defaultdict(SCREAMING_SNAKE_CASE_ )
for line in correct_lines:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Tuple = line.split(';' )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
parser.add_argument("""--correct_filename""", help="""filename of tests with expected result""")
parser.add_argument("""--fail_filename""", help="""filename of test failures""", type=str, default=None)
lowerCamelCase__ = parser.parse_args()
main(args.correct_filename, args.fail_filename) | 69 |
import unittest
from transformers import DonutProcessor
lowerCamelCase__ = """naver-clova-ix/donut-base"""
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = DonutProcessor.from_pretrained(a )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = {
'name': 'John Doe',
'age': '99',
'city': 'Atlanta',
'state': 'GA',
'zip': '30301',
'phone': '123-4567',
'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}],
}
lowerCAmelCase__ : Union[str, Any] = (
'<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'
'<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'
'<s_nicknames><s_nickname>Johnny</s_nickname>'
'<sep/><s_nickname>JD</s_nickname></s_nicknames>'
)
lowerCAmelCase__ : Optional[Any] = self.processor.tokenajson(a )
self.assertDictEqual(a , a ) | 69 | 1 |
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class A__ ( __magic_name__ ):
def __init__( self : List[Any] , a : Optional[int] , a : Union[str, Any]=13 , a : List[Any]=7 , a : Optional[int]=True , a : Dict=True , a : int=True , a : Tuple=True , a : Any=99 , a : Tuple=32 , a : List[str]=5 , a : Dict=4 , a : Dict=37 , a : Union[str, Any]="gelu" , a : Union[str, Any]=0.1 , a : Tuple=0.1 , a : Optional[Any]=512 , a : Optional[Any]=16 , a : Tuple=2 , a : List[Any]=0.0_2 , a : Optional[int]=False , a : int=True , a : Union[str, Any]="None" , a : List[Any]=3 , a : int=4 , a : List[str]=None , ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = parent
lowerCAmelCase__ : Union[str, Any] = batch_size
lowerCAmelCase__ : int = seq_length
lowerCAmelCase__ : str = is_training
lowerCAmelCase__ : Union[str, Any] = use_input_mask
lowerCAmelCase__ : int = use_token_type_ids
lowerCAmelCase__ : Optional[int] = use_labels
lowerCAmelCase__ : List[str] = vocab_size
lowerCAmelCase__ : Dict = hidden_size
lowerCAmelCase__ : Optional[int] = num_hidden_layers
lowerCAmelCase__ : Optional[int] = num_attention_heads
lowerCAmelCase__ : Optional[Any] = intermediate_size
lowerCAmelCase__ : List[str] = hidden_act
lowerCAmelCase__ : Union[str, Any] = hidden_dropout_prob
lowerCAmelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase__ : Any = max_position_embeddings
lowerCAmelCase__ : int = type_vocab_size
lowerCAmelCase__ : Optional[int] = type_sequence_label_size
lowerCAmelCase__ : int = initializer_range
lowerCAmelCase__ : Any = num_labels
lowerCAmelCase__ : Optional[Any] = num_choices
lowerCAmelCase__ : str = relative_attention
lowerCAmelCase__ : int = position_biased_input
lowerCAmelCase__ : List[str] = pos_att_type
lowerCAmelCase__ : Tuple = scope
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : List[Any] = None
if self.use_input_mask:
lowerCAmelCase__ : int = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
lowerCAmelCase__ : Union[str, Any] = None
if self.use_token_type_ids:
lowerCAmelCase__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : Tuple = None
if self.use_labels:
lowerCAmelCase__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : int = self.get_config()
lowerCAmelCase__ : List[str] = 300
return config
def _lowerCamelCase ( self : str , a : Tuple ):
'''simple docstring'''
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def _lowerCamelCase ( self : List[str] , a : Optional[Any] , a : str , a : int , a : Tuple , a : Any , a : Tuple , a : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = DebertaModel(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : str = model(a , attention_mask=a , token_type_ids=a )[0]
lowerCAmelCase__ : Optional[Any] = model(a , token_type_ids=a )[0]
lowerCAmelCase__ : Union[str, Any] = model(a )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def _lowerCamelCase ( self : str , a : Dict , a : int , a : List[str] , a : Tuple , a : str , a : Union[str, Any] , a : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = DebertaForMaskedLM(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : List[str] = model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : List[Any] , a : Optional[Any] , a : List[str] , a : Tuple , a : List[str] , a : Any , a : Any , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.num_labels
lowerCAmelCase__ : Optional[Any] = DebertaForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : List[str] = model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(a )
def _lowerCamelCase ( self : Union[str, Any] , a : Optional[Any] , a : Any , a : List[Any] , a : Any , a : Union[str, Any] , a : str , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.num_labels
lowerCAmelCase__ : str = DebertaForTokenClassification(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Union[str, Any] = model(a , attention_mask=a , token_type_ids=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self : List[str] , a : List[Any] , a : List[str] , a : Dict , a : Tuple , a : Optional[int] , a : List[Any] , a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = DebertaForQuestionAnswering(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Tuple = model(
a , attention_mask=a , token_type_ids=a , start_positions=a , end_positions=a , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : str = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : List[str] = config_and_inputs
lowerCAmelCase__ : str = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
lowercase = (
{
'feature-extraction': DebertaModel,
'fill-mask': DebertaForMaskedLM,
'question-answering': DebertaForQuestionAnswering,
'text-classification': DebertaForSequenceClassification,
'token-classification': DebertaForTokenClassification,
'zero-shot': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = True
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : str = DebertaModelTester(self )
lowerCAmelCase__ : Dict = ConfigTester(self , config_class=a , hidden_size=37 )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*a )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*a )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*a )
@slow
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Tuple = DebertaModel.from_pretrained(a )
self.assertIsNotNone(a )
@require_torch
@require_sentencepiece
@require_tokenizers
class A__ ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
pass
@slow
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = DebertaModel.from_pretrained('microsoft/deberta-base' )
lowerCAmelCase__ : Dict = torch.tensor([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
lowerCAmelCase__ : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
lowerCAmelCase__ : Tuple = model(a , attention_mask=a )[0]
# compare the actual values for a slice.
lowerCAmelCase__ : int = torch.tensor(
[[[-0.5_9_8_6, -0.8_0_5_5, -0.8_4_6_2], [1.4_4_8_4, -0.9_3_4_8, -0.8_0_5_9], [0.3_1_2_3, 0.0_0_3_2, -1.4_1_3_1]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , a , atol=1E-4 ) , f'''{output[:, 1:4, 1:4]}''' ) | 69 |
from numpy import exp, pi, sqrt
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 1.0 ) -> int:
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 69 | 1 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> list:
lowerCAmelCase__ : Any = [0] * len(SCREAMING_SNAKE_CASE_ )
for i in range(1 , len(SCREAMING_SNAKE_CASE_ ) ):
# use last results for better performance - dynamic programming
lowerCAmelCase__ : Optional[int] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
lowerCAmelCase__ : Union[str, Any] = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
lowerCAmelCase__ : Any = j
return prefix_result
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> int:
return max(prefix_function(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 69 |
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = XLMTokenizer
lowercase = False
def _lowerCamelCase ( self : int ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase__ : List[str] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
lowerCAmelCase__ : Any = dict(zip(a , range(len(a ) ) ) )
lowerCAmelCase__ : Optional[int] = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
lowerCAmelCase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(a ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(a ) )
def _lowerCamelCase ( self : List[str] , a : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = 'lower newer'
lowerCAmelCase__ : Any = 'lower newer'
return input_text, output_text
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = XLMTokenizer(self.vocab_file , self.merges_file )
lowerCAmelCase__ : Optional[int] = 'lower'
lowerCAmelCase__ : Optional[Any] = ['low', 'er</w>']
lowerCAmelCase__ : Dict = tokenizer.tokenize(a )
self.assertListEqual(a , a )
lowerCAmelCase__ : Tuple = tokens + ['<unk>']
lowerCAmelCase__ : Optional[int] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
@slow
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = XLMTokenizer.from_pretrained('xlm-mlm-en-2048' )
lowerCAmelCase__ : Any = tokenizer.encode('sequence builders' , add_special_tokens=a )
lowerCAmelCase__ : Union[str, Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=a )
lowerCAmelCase__ : List[Any] = tokenizer.build_inputs_with_special_tokens(a )
lowerCAmelCase__ : List[str] = tokenizer.build_inputs_with_special_tokens(a , a )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1] | 69 | 1 |
import argparse
import collections
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import TaConfig, TaEncoderModel, TaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="attention" ) -> List[str]:
lowerCAmelCase__ : Optional[Any] = params[F'''{prefix}/layers_{i}/{layer_name}/key/kernel''']
lowerCAmelCase__ : str = params[F'''{prefix}/layers_{i}/{layer_name}/out/kernel''']
lowerCAmelCase__ : List[str] = params[F'''{prefix}/layers_{i}/{layer_name}/query/kernel''']
lowerCAmelCase__ : List[Any] = params[F'''{prefix}/layers_{i}/{layer_name}/value/kernel''']
return k, o, q, v
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> str:
if split_mlp_wi:
lowerCAmelCase__ : Optional[int] = params[F'''{prefix}/layers_{i}/mlp/wi_0/kernel''']
lowerCAmelCase__ : Tuple = params[F'''{prefix}/layers_{i}/mlp/wi_1/kernel''']
lowerCAmelCase__ : str = (wi_a, wi_a)
else:
lowerCAmelCase__ : Tuple = params[F'''{prefix}/layers_{i}/mlp/wi/kernel''']
lowerCAmelCase__ : Union[str, Any] = params[F'''{prefix}/layers_{i}/mlp/wo/kernel''']
return wi, wo
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
return params[F'''{prefix}/layers_{i}/{layer_name}/scale''']
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , *, SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
lowerCAmelCase__ : Tuple = traverse_util.flatten_dict(variables['target'] )
lowerCAmelCase__ : int = {'/'.join(SCREAMING_SNAKE_CASE_ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
lowerCAmelCase__ : Dict = 'encoder/layers_0/mlp/wi_0/kernel' in old
print('Split MLP:' , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = collections.OrderedDict()
# Shared embeddings.
lowerCAmelCase__ : Optional[Any] = old['token_embedder/embedding']
# Encoder.
for i in range(SCREAMING_SNAKE_CASE_ ):
# Block i, layer 0 (Self Attention).
lowerCAmelCase__ : Optional[Any] = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'encoder' , 'pre_attention_layer_norm' )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = tax_attention_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'encoder' , 'attention' )
lowerCAmelCase__ : Tuple = layer_norm
lowerCAmelCase__ : Tuple = k.T
lowerCAmelCase__ : int = o.T
lowerCAmelCase__ : Union[str, Any] = q.T
lowerCAmelCase__ : Optional[Any] = v.T
# Block i, layer 1 (MLP).
lowerCAmelCase__ : Dict = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'encoder' , 'pre_mlp_layer_norm' )
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = tax_mlp_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'encoder' , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Union[str, Any] = layer_norm
if split_mlp_wi:
lowerCAmelCase__ : Dict = wi[0].T
lowerCAmelCase__ : Union[str, Any] = wi[1].T
else:
lowerCAmelCase__ : int = wi.T
lowerCAmelCase__ : int = wo.T
lowerCAmelCase__ : Tuple = old[
'encoder/relpos_bias/rel_embedding'
].T
lowerCAmelCase__ : Union[str, Any] = old['encoder/encoder_norm/scale']
if not is_encoder_only:
# Decoder.
for i in range(SCREAMING_SNAKE_CASE_ ):
# Block i, layer 0 (Self Attention).
lowerCAmelCase__ : str = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'decoder' , 'pre_self_attention_layer_norm' )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : int = tax_attention_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'decoder' , 'self_attention' )
lowerCAmelCase__ : List[str] = layer_norm
lowerCAmelCase__ : Tuple = k.T
lowerCAmelCase__ : Union[str, Any] = o.T
lowerCAmelCase__ : Optional[int] = q.T
lowerCAmelCase__ : Tuple = v.T
# Block i, layer 1 (Cross Attention).
lowerCAmelCase__ : str = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'decoder' , 'pre_cross_attention_layer_norm' )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Tuple = tax_attention_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'decoder' , 'encoder_decoder_attention' )
lowerCAmelCase__ : Tuple = layer_norm
lowerCAmelCase__ : List[Any] = k.T
lowerCAmelCase__ : Optional[int] = o.T
lowerCAmelCase__ : int = q.T
lowerCAmelCase__ : Optional[Any] = v.T
# Block i, layer 2 (MLP).
lowerCAmelCase__ : Optional[int] = tax_layer_norm_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'decoder' , 'pre_mlp_layer_norm' )
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = tax_mlp_lookup(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , 'decoder' , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = layer_norm
if split_mlp_wi:
lowerCAmelCase__ : Dict = wi[0].T
lowerCAmelCase__ : str = wi[1].T
else:
lowerCAmelCase__ : Any = wi.T
lowerCAmelCase__ : str = wo.T
lowerCAmelCase__ : Dict = old['decoder/decoder_norm/scale']
lowerCAmelCase__ : List[Any] = old[
'decoder/relpos_bias/rel_embedding'
].T
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
lowerCAmelCase__ : Dict = old['decoder/logits_dense/kernel'].T
return new
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
lowerCAmelCase__ : Tuple = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
lowerCAmelCase__ : Tuple = state_dict['shared.weight']
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
lowerCAmelCase__ : int = state_dict['shared.weight']
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print('Using shared word embeddings as lm_head.' )
lowerCAmelCase__ : Dict = state_dict['shared.weight']
return state_dict
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ : str = checkpoints.load_tax_checkpoint(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = convert_tax_to_pytorch(SCREAMING_SNAKE_CASE_ , num_layers=config.num_layers , is_encoder_only=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Union[str, Any] = make_state_dict(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
model.load_state_dict(SCREAMING_SNAKE_CASE_ , strict=SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = False ) -> List[str]:
lowerCAmelCase__ : Optional[Any] = TaConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
print(F'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
lowerCAmelCase__ : Any = TaEncoderModel(SCREAMING_SNAKE_CASE_ )
else:
lowerCAmelCase__ : Optional[int] = TaForConditionalGeneration(SCREAMING_SNAKE_CASE_ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
# Verify that we can load the checkpoint.
model.from_pretrained(SCREAMING_SNAKE_CASE_ )
print('Done' )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
lowerCamelCase__ = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path, args.config_file, args.pytorch_dump_path, args.is_encoder_only
) | 69 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[str]:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = image.size
lowerCAmelCase__ , lowerCAmelCase__ : str = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
lowerCAmelCase__ : Any = image.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] )
lowerCAmelCase__ : int = np.array(SCREAMING_SNAKE_CASE_ ).astype(np.floataa ) / 255.0
lowerCAmelCase__ : Optional[int] = image[None].transpose(0 , 3 , 1 , 2 )
lowerCAmelCase__ : List[Any] = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
return 2.0 * image - 1.0
class A__ ( __magic_name__ ):
def __init__( self : List[str] , a : VQModel , a : UNetaDModel , a : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=a , unet=a , scheduler=a )
@torch.no_grad()
def __call__( self : int , a : Union[torch.Tensor, PIL.Image.Image] = None , a : Optional[int] = 1 , a : Optional[int] = 100 , a : Optional[float] = 0.0 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[str] = "pil" , a : bool = True , ):
'''simple docstring'''
if isinstance(a , PIL.Image.Image ):
lowerCAmelCase__ : str = 1
elif isinstance(a , torch.Tensor ):
lowerCAmelCase__ : Union[str, Any] = image.shape[0]
else:
raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(a )}''' )
if isinstance(a , PIL.Image.Image ):
lowerCAmelCase__ : List[Any] = preprocess(a )
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
lowerCAmelCase__ : Optional[int] = (batch_size, self.unet.config.in_channels // 2, height, width)
lowerCAmelCase__ : Optional[Any] = next(self.unet.parameters() ).dtype
lowerCAmelCase__ : List[str] = randn_tensor(a , generator=a , device=self.device , dtype=a )
lowerCAmelCase__ : Any = image.to(device=self.device , dtype=a )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(a , device=self.device )
lowerCAmelCase__ : Optional[Any] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase__ : Optional[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCAmelCase__ : Union[str, Any] = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase__ : List[str] = {}
if accepts_eta:
lowerCAmelCase__ : List[Any] = eta
for t in self.progress_bar(a ):
# concat latents and low resolution image in the channel dimension.
lowerCAmelCase__ : Union[str, Any] = torch.cat([latents, image] , dim=1 )
lowerCAmelCase__ : Dict = self.scheduler.scale_model_input(a , a )
# predict the noise residual
lowerCAmelCase__ : Tuple = self.unet(a , a ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase__ : List[str] = self.scheduler.step(a , a , a , **a ).prev_sample
# decode the image latents with the VQVAE
lowerCAmelCase__ : Dict = self.vqvae.decode(a ).sample
lowerCAmelCase__ : Tuple = torch.clamp(a , -1.0 , 1.0 )
lowerCAmelCase__ : Tuple = image / 2 + 0.5
lowerCAmelCase__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase__ : int = self.numpy_to_pil(a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a ) | 69 | 1 |
from numpy import exp, pi, sqrt
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 1.0 ) -> int:
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 69 |
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class A__ ( __magic_name__ ):
def __init__( self : Union[str, Any] , a : str="" , a : str="train" ):
'''simple docstring'''
assert os.path.isdir(a )
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : Dict = os.listdir(a )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
lowerCAmelCase__ : Union[str, Any] = os.path.join(a , a )
if not os.path.isfile(a ):
continue
self.documents.append(a )
def __len__( self : Any ):
'''simple docstring'''
return len(self.documents )
def __getitem__( self : Dict , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.documents[idx]
lowerCAmelCase__ : Union[str, Any] = document_path.split('/' )[-1]
with open(a , encoding='utf-8' ) as source:
lowerCAmelCase__ : List[Any] = source.read()
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = process_story(a )
return document_name, story_lines, summary_lines
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Tuple:
lowerCAmelCase__ : Optional[int] = list(filter(lambda SCREAMING_SNAKE_CASE_ : len(SCREAMING_SNAKE_CASE_ ) != 0 , [line.strip() for line in raw_story.split('\n' )] ) )
# for some unknown reason some lines miss a period, add it
lowerCAmelCase__ : List[Any] = [_add_missing_period(SCREAMING_SNAKE_CASE_ ) for line in nonempty_lines]
# gather article lines
lowerCAmelCase__ : int = []
lowerCAmelCase__ : Any = deque(SCREAMING_SNAKE_CASE_ )
while True:
try:
lowerCAmelCase__ : int = lines.popleft()
if element.startswith('@highlight' ):
break
story_lines.append(SCREAMING_SNAKE_CASE_ )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
lowerCAmelCase__ : Tuple = list(filter(lambda SCREAMING_SNAKE_CASE_ : not t.startswith('@highlight' ) , SCREAMING_SNAKE_CASE_ ) )
return story_lines, summary_lines
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Any:
lowerCAmelCase__ : int = ['.', '!', '?', '...', '\'', '`', '"', '\u2019', '\u2019', ')']
if line.startswith('@highlight' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
if len(SCREAMING_SNAKE_CASE_ ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(SCREAMING_SNAKE_CASE_ )) )
return sequence
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
lowerCAmelCase__ : str = torch.ones_like(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : int = sequence == pad_token_id
lowerCAmelCase__ : Optional[int] = 0
return mask
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
lowerCAmelCase__ : Any = [tokenizer.encode(SCREAMING_SNAKE_CASE_ ) for line in story_lines]
lowerCAmelCase__ : str = [token for sentence in story_lines_token_ids for token in sentence]
lowerCAmelCase__ : Dict = [tokenizer.encode(SCREAMING_SNAKE_CASE_ ) for line in summary_lines]
lowerCAmelCase__ : str = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
lowerCAmelCase__ : Optional[int] = []
for sequence in batch:
lowerCAmelCase__ : Union[str, Any] = -1
lowerCAmelCase__ : int = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(SCREAMING_SNAKE_CASE_ )
return torch.tensor(SCREAMING_SNAKE_CASE_ ) | 69 | 1 |
import math
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
if 0 not in (x, y):
# We use the relation x^y = y*log10(x), where 10 is the base.
return y * math.logaa(SCREAMING_SNAKE_CASE_ )
else:
if x == 0: # 0 raised to any number is 0
return 0
elif y == 0:
return 1 # any number raised to 0 is 1
raise AssertionError('This should never happen' )
if __name__ == "__main__": # Main function
# Read two numbers from input and typecast them to int using map function.
# Here x is the base and y is the power.
lowerCamelCase__ = """Enter the base and the power separated by a comma: """
lowerCamelCase__ , lowerCamelCase__ = map(int, input(prompt).split(""","""))
lowerCamelCase__ , lowerCamelCase__ = map(int, input(prompt).split(""","""))
# We find the log of each number, using the function res(), which takes two
# arguments.
lowerCamelCase__ = res(xa, ya)
lowerCamelCase__ = res(xa, ya)
# We check for the largest number
if resa > resa:
print("""Largest number is""", xa, """^""", ya)
elif resa > resa:
print("""Largest number is""", xa, """^""", ya)
else:
print("""Both are equal""") | 69 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCamelCase__ = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase__ = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowerCamelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCamelCase__ = re.compile(r"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
lowerCamelCase__ = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[str]:
lowerCAmelCase__ : int = None
# source code of `config_class`
lowerCAmelCase__ : Optional[int] = inspect.getsource(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = _re_checkpoint.findall(SCREAMING_SNAKE_CASE_ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('/' ):
lowerCAmelCase__ : Union[str, Any] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
lowerCAmelCase__ : Dict = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
lowerCAmelCase__ : str = ckpt_name
break
return checkpoint
def lowerCAmelCase__ ( ) -> int:
lowerCAmelCase__ : Union[str, Any] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
lowerCAmelCase__ : Union[str, Any] = get_checkpoint_from_config_class(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[Any] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
lowerCAmelCase__ : List[str] = '\n'.join(sorted(SCREAMING_SNAKE_CASE_ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints() | 69 | 1 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if num == 0:
return "0b0"
lowerCAmelCase__ : List[Any] = False
if num < 0:
lowerCAmelCase__ : List[str] = True
lowerCAmelCase__ : str = -num
lowerCAmelCase__ : list[int] = []
while num > 0:
binary.insert(0 , num % 2 )
num >>= 1
if negative:
return "-0b" + "".join(str(SCREAMING_SNAKE_CASE_ ) for e in binary )
return "0b" + "".join(str(SCREAMING_SNAKE_CASE_ ) for e in binary )
if __name__ == "__main__":
import doctest
doctest.testmod() | 69 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
"""configuration_luke""": ["""LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LukeConfig"""],
"""tokenization_luke""": ["""LukeTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""LUKE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LukeForEntityClassification""",
"""LukeForEntityPairClassification""",
"""LukeForEntitySpanClassification""",
"""LukeForMultipleChoice""",
"""LukeForQuestionAnswering""",
"""LukeForSequenceClassification""",
"""LukeForTokenClassification""",
"""LukeForMaskedLM""",
"""LukeModel""",
"""LukePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 69 | 1 |
lowerCamelCase__ = """Alexander Joslin"""
import operator as op
from .stack import Stack
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> int:
lowerCAmelCase__ : Union[str, Any] = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
lowerCAmelCase__ : Stack[int] = Stack()
lowerCAmelCase__ : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(SCREAMING_SNAKE_CASE_ ) )
elif i in operators:
# RULE 2
operator_stack.push(SCREAMING_SNAKE_CASE_ )
elif i == ")":
# RULE 4
lowerCAmelCase__ : List[Any] = operator_stack.peek()
operator_stack.pop()
lowerCAmelCase__ : List[str] = operand_stack.peek()
operand_stack.pop()
lowerCAmelCase__ : List[Any] = operand_stack.peek()
operand_stack.pop()
lowerCAmelCase__ : Tuple = operators[opr](SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
operand_stack.push(SCREAMING_SNAKE_CASE_ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase__ = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""") | 69 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {
"""configuration_chinese_clip""": [
"""CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ChineseCLIPConfig""",
"""ChineseCLIPOnnxConfig""",
"""ChineseCLIPTextConfig""",
"""ChineseCLIPVisionConfig""",
],
"""processing_chinese_clip""": ["""ChineseCLIPProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""ChineseCLIPFeatureExtractor"""]
lowerCamelCase__ = ["""ChineseCLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ChineseCLIPModel""",
"""ChineseCLIPPreTrainedModel""",
"""ChineseCLIPTextModel""",
"""ChineseCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 69 | 1 |
from datetime import datetime
import requests
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bytes:
lowerCAmelCase__ : Optional[int] = 'https://downloadgram.net/wp-json/wppress/video-downloader/video?url='
lowerCAmelCase__ : Tuple = requests.get(base_url + url ).json()[0]['urls'][0]['src']
return requests.get(SCREAMING_SNAKE_CASE_ ).content
if __name__ == "__main__":
lowerCamelCase__ = input("""Enter Video/IGTV url: """).strip()
lowerCamelCase__ = F"""{datetime.now():%Y-%m-%d_%H:%M:%S}.mp4"""
with open(file_name, """wb""") as fp:
fp.write(download_video(url))
print(F"""Done. Video saved to disk as {file_name}.""") | 69 |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
lowerCamelCase__ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
lowerCamelCase__ = """cuda""" if torch.cuda.is_available() else """cpu"""
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=100 , SCREAMING_SNAKE_CASE_=" " ) -> List[str]:
lowerCAmelCase__ : Optional[Any] = text.split(SCREAMING_SNAKE_CASE_ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> dict:
lowerCAmelCase__ , lowerCAmelCase__ : int = [], []
for title, text in zip(documents['title'] , documents['text'] ):
if text is not None:
for passage in split_text(SCREAMING_SNAKE_CASE_ ):
titles.append(title if title is not None else '' )
texts.append(SCREAMING_SNAKE_CASE_ )
return {"title": titles, "text": texts}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> dict:
lowerCAmelCase__ : List[str] = ctx_tokenizer(
documents['title'] , documents['text'] , truncation=SCREAMING_SNAKE_CASE_ , padding='longest' , return_tensors='pt' )['input_ids']
lowerCAmelCase__ : Tuple = ctx_encoder(input_ids.to(device=SCREAMING_SNAKE_CASE_ ) , return_dict=SCREAMING_SNAKE_CASE_ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]:
######################################
logger.info('Step 1 - Create the dataset' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowerCAmelCase__ : str = load_dataset(
'csv' , data_files=[rag_example_args.csv_path] , split='train' , delimiter='\t' , column_names=['title', 'text'] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowerCAmelCase__ : Optional[Any] = dataset.map(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , num_proc=processing_args.num_proc )
# And compute the embeddings
lowerCAmelCase__ : List[str] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
lowerCAmelCase__ : List[Any] = Features(
{'text': Value('string' ), 'title': Value('string' ), 'embeddings': Sequence(Value('float32' ) )} ) # optional, save as float32 instead of float64 to save space
lowerCAmelCase__ : List[Any] = dataset.map(
partial(SCREAMING_SNAKE_CASE_ , ctx_encoder=SCREAMING_SNAKE_CASE_ , ctx_tokenizer=SCREAMING_SNAKE_CASE_ ) , batched=SCREAMING_SNAKE_CASE_ , batch_size=processing_args.batch_size , features=SCREAMING_SNAKE_CASE_ , )
# And finally save your dataset
lowerCAmelCase__ : Optional[Any] = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset' )
dataset.save_to_disk(SCREAMING_SNAKE_CASE_ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('Step 2 - Index the dataset' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowerCAmelCase__ : Optional[int] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('embeddings' , custom_index=SCREAMING_SNAKE_CASE_ )
# And save the index
lowerCAmelCase__ : str = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset_hnsw_index.faiss' )
dataset.get_index('embeddings' ).save(SCREAMING_SNAKE_CASE_ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class A__ :
lowercase = field(
default=str(Path(__magic_name__ ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ) , metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} , )
lowercase = field(
default=__magic_name__ , metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} , )
lowercase = field(
default='facebook/rag-sequence-nq' , metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} , )
lowercase = field(
default='facebook/dpr-ctx_encoder-multiset-base' , metadata={
'help': (
'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'
' \'facebook/dpr-ctx_encoder-multiset-base\''
)
} , )
lowercase = field(
default=str(Path(__magic_name__ ).parent / 'test_run' / 'dummy-kb' ) , metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} , )
@dataclass
class A__ :
lowercase = field(
default=__magic_name__ , metadata={
'help': 'The number of processes to use to split the documents into passages. Default is single process.'
} , )
lowercase = field(
default=16 , metadata={
'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.'
} , )
@dataclass
class A__ :
lowercase = field(
default=768 , metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} , )
lowercase = field(
default=128 , metadata={
'help': (
'The number of bi-directional links created for every new element during the HNSW index construction.'
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
lowerCamelCase__ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
lowerCamelCase__ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args) | 69 | 1 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Tuple:
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class A__ ( nn.Module ):
def __init__( self : Optional[int] , a : nn.Module , a : int ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ : Optional[Any] = module
lowerCAmelCase__ : Dict = nn.Sequential(
nn.Linear(module.in_features , a , bias=a ) , nn.Linear(a , module.out_features , bias=a ) , )
lowerCAmelCase__ : List[Any] = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=a )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def _lowerCamelCase ( self : List[str] , a : Any , *a : int , **a : Tuple ):
'''simple docstring'''
return self.module(a , *a , **a ) + self.adapter(a )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A__ ( unittest.TestCase ):
# We keep the constants inside the init function and model loading inside setUp function
# We need to test on relatively large models (aka >1b parameters otherwise the quantiztion may not work as expected)
# Therefore here we use only bloom-1b3 to test our module
lowercase = 'bigscience/bloom-1b7'
# Constant values
lowercase = 2.1_09_65_95_52_69_25_74
lowercase = 'Hello my name is'
lowercase = set()
EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' )
EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' )
EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' )
lowercase = 10
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = AutoTokenizer.from_pretrained(self.model_name )
class A__ ( __magic_name__ ):
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
super().setUp()
# Models and tokenizer
lowerCAmelCase__ : int = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map='auto' )
lowerCAmelCase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=a , device_map='auto' )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.model_abit.config
self.assertTrue(hasattr(a , 'quantization_config' ) )
lowerCAmelCase__ : List[Any] = config.to_dict()
lowerCAmelCase__ : List[str] = config.to_diff_dict()
lowerCAmelCase__ : Optional[int] = config.to_json_string()
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
lowerCAmelCase__ : Union[str, Any] = self.model_fpaa.get_memory_footprint()
lowerCAmelCase__ : Optional[int] = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
lowerCAmelCase__ : Union[str, Any] = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(a , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = self.tokenizer(self.input_text , return_tensors='pt' )
lowerCAmelCase__ : List[str] = self.model_abit.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=a ) , self.EXPECTED_OUTPUTS )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = BitsAndBytesConfig()
lowerCAmelCase__ : Optional[Any] = True
lowerCAmelCase__ : Optional[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=a , device_map='auto' )
lowerCAmelCase__ : int = self.tokenizer(self.input_text , return_tensors='pt' )
lowerCAmelCase__ : int = model_abit_from_config.generate(
input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=a ) , self.EXPECTED_OUTPUTS )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
with self.assertRaises(a ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = BitsAndBytesConfig()
with self.assertRaises(a ):
lowerCAmelCase__ : Union[str, Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=a , load_in_abit=a , device_map='auto' , bnb_abit_quant_type='nf4' , )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
with self.assertRaises(a ):
# Tries with `str`
self.model_abit.to('cpu' )
with self.assertRaises(a ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(a ):
# Tries with a `device`
self.model_abit.to(torch.device('cuda:0' ) )
with self.assertRaises(a ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(a ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
lowerCAmelCase__ : str = self.tokenizer(self.input_text , return_tensors='pt' )
lowerCAmelCase__ : List[str] = self.model_fpaa.to(torch.floataa )
lowerCAmelCase__ : Tuple = self.model_fpaa.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
lowerCAmelCase__ : List[Any] = self.model_fpaa.to('cpu' )
# Check this does not throw an error
lowerCAmelCase__ : Union[str, Any] = self.model_fpaa.half()
# Check this does not throw an error
lowerCAmelCase__ : Optional[Any] = self.model_fpaa.float()
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = AutoModelForSeqaSeqLM.from_pretrained('t5-small' , load_in_abit=a , device_map='auto' )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class A__ ( unittest.TestCase ):
@classmethod
def _lowerCamelCase ( cls : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : str = 't5-small'
lowerCAmelCase__ : Optional[int] = 'google/flan-t5-small' # flan-t5 uses dense-act instead of dense-relu-dense
lowerCAmelCase__ : Union[str, Any] = AutoTokenizer.from_pretrained(cls.model_name )
lowerCAmelCase__ : Optional[int] = 'Translate in German: Hello, my dog is cute'
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
from transformers import TaForConditionalGeneration
lowerCAmelCase__ : Any = TaForConditionalGeneration._keep_in_fpaa_modules
lowerCAmelCase__ : Tuple = None
# test with `t5-small`
lowerCAmelCase__ : Union[str, Any] = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=a , device_map='auto' )
lowerCAmelCase__ : Optional[Any] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
lowerCAmelCase__ : Any = model.generate(**a )
# test with `flan-t5-small`
lowerCAmelCase__ : List[Any] = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=a , device_map='auto' )
lowerCAmelCase__ : Optional[int] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
lowerCAmelCase__ : Optional[int] = model.generate(**a )
lowerCAmelCase__ : Any = modules
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
lowerCAmelCase__ : Tuple = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=a , device_map='auto' )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
lowerCAmelCase__ : List[Any] = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
lowerCAmelCase__ : Optional[Any] = model.generate(**a )
# test with `flan-t5-small`
lowerCAmelCase__ : Any = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=a , device_map='auto' )
lowerCAmelCase__ : Tuple = self.tokenizer(self.input_text , return_tensors='pt' ).to(0 )
lowerCAmelCase__ : Optional[int] = model.generate(**a )
class A__ ( __magic_name__ ):
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
super().setUp()
# model_name
lowerCAmelCase__ : Dict = 'bigscience/bloom-560m'
lowerCAmelCase__ : int = 't5-small'
# Different types of model
lowerCAmelCase__ : str = AutoModel.from_pretrained(self.model_name , load_in_abit=a , device_map='auto' )
# Sequence classification model
lowerCAmelCase__ : Optional[int] = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=a , device_map='auto' )
# CausalLM model
lowerCAmelCase__ : Optional[int] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=a , device_map='auto' )
# Seq2seq model
lowerCAmelCase__ : str = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=a , device_map='auto' )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class A__ ( __magic_name__ ):
def _lowerCamelCase ( self : int ):
'''simple docstring'''
super().setUp()
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = pipeline(
'text-generation' , model=self.model_name , model_kwargs={'device_map': 'auto', 'load_in_4bit': True, 'torch_dtype': torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
lowerCAmelCase__ : Optional[int] = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]['generated_text'] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class A__ ( __magic_name__ ):
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
super().setUp()
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=a , device_map='balanced' )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
lowerCAmelCase__ : Optional[int] = self.tokenizer(self.input_text , return_tensors='pt' )
# Second real batch
lowerCAmelCase__ : Any = model_parallel.generate(input_ids=encoded_input['input_ids'].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=a ) , self.EXPECTED_OUTPUTS )
class A__ ( __magic_name__ ):
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = 'facebook/opt-350m'
super().setUp()
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
if version.parse(importlib.metadata.version('bitsandbytes' ) ) < version.parse('0.37.0' ):
return
# Step 1: freeze all parameters
lowerCAmelCase__ : List[Any] = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=a )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
lowerCAmelCase__ : Any = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
lowerCAmelCase__ : List[str] = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(a ) ):
lowerCAmelCase__ : Optional[int] = LoRALayer(module.q_proj , rank=16 )
lowerCAmelCase__ : Optional[int] = LoRALayer(module.k_proj , rank=16 )
lowerCAmelCase__ : Any = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
lowerCAmelCase__ : List[Any] = self.tokenizer('Test batch ' , return_tensors='pt' ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
lowerCAmelCase__ : Tuple = model.forward(**a )
out.logits.norm().backward()
for module in model.modules():
if isinstance(a , a ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(a , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class A__ ( __magic_name__ ):
lowercase = 'gpt2-xl'
lowercase = 3.31_91_85_48_54_15_21_87 | 69 |
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A__ ( __magic_name__ ):
lowercase = (DDPMParallelScheduler,)
def _lowerCamelCase ( self : str , **a : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : str = {
'num_train_timesteps': 1_000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**a )
return config
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=a , beta_end=a )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=a )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
self.check_over_configs(thresholding=a )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=a , prediction_type=a , sample_max_value=a , )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=a )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.scheduler_classes[0]
lowerCAmelCase__ : Any = self.get_scheduler_config()
lowerCAmelCase__ : List[str] = scheduler_class(**a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1E-5
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.scheduler_classes[0]
lowerCAmelCase__ : Any = self.get_scheduler_config()
lowerCAmelCase__ : int = scheduler_class(**a )
lowerCAmelCase__ : str = len(a )
lowerCAmelCase__ : Tuple = self.dummy_model()
lowerCAmelCase__ : Optional[Any] = self.dummy_sample_deter
lowerCAmelCase__ : int = self.dummy_sample_deter + 0.1
lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter - 0.1
lowerCAmelCase__ : Tuple = samplea.shape[0]
lowerCAmelCase__ : List[Any] = torch.stack([samplea, samplea, samplea] , dim=0 )
lowerCAmelCase__ : Optional[Any] = torch.arange(a )[0:3, None].repeat(1 , a )
lowerCAmelCase__ : List[str] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
lowerCAmelCase__ : Tuple = scheduler.batch_step_no_noise(a , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
lowerCAmelCase__ : str = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Any = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1E-2
assert abs(result_mean.item() - 0.5_0_0_5 ) < 1E-3
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : str = self.scheduler_classes[0]
lowerCAmelCase__ : List[Any] = self.get_scheduler_config()
lowerCAmelCase__ : Dict = scheduler_class(**a )
lowerCAmelCase__ : str = len(a )
lowerCAmelCase__ : Any = self.dummy_model()
lowerCAmelCase__ : int = self.dummy_sample_deter
lowerCAmelCase__ : Tuple = torch.manual_seed(0 )
for t in reversed(range(a ) ):
# 1. predict noise residual
lowerCAmelCase__ : Optional[Any] = model(a , a )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase__ : int = scheduler.step(a , a , a , generator=a ).prev_sample
lowerCAmelCase__ : List[str] = pred_prev_sample
lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1E-3
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = self.scheduler_classes[0]
lowerCAmelCase__ : Dict = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCAmelCase__ : int = scheduler_class(**a )
lowerCAmelCase__ : str = len(a )
lowerCAmelCase__ : Optional[int] = self.dummy_model()
lowerCAmelCase__ : List[str] = self.dummy_sample_deter
lowerCAmelCase__ : Optional[Any] = torch.manual_seed(0 )
for t in reversed(range(a ) ):
# 1. predict noise residual
lowerCAmelCase__ : List[Any] = model(a , a )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase__ : Optional[int] = scheduler.step(a , a , a , generator=a ).prev_sample
lowerCAmelCase__ : str = pred_prev_sample
lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Any = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1E-3
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.scheduler_classes[0]
lowerCAmelCase__ : Any = self.get_scheduler_config()
lowerCAmelCase__ : Optional[int] = scheduler_class(**a )
lowerCAmelCase__ : Optional[Any] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=a )
lowerCAmelCase__ : List[Any] = scheduler.timesteps
for i, timestep in enumerate(a ):
if i == len(a ) - 1:
lowerCAmelCase__ : Tuple = -1
else:
lowerCAmelCase__ : Dict = timesteps[i + 1]
lowerCAmelCase__ : str = scheduler.previous_timestep(a )
lowerCAmelCase__ : int = prev_t.item()
self.assertEqual(a , a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.scheduler_classes[0]
lowerCAmelCase__ : Optional[int] = self.get_scheduler_config()
lowerCAmelCase__ : Optional[Any] = scheduler_class(**a )
lowerCAmelCase__ : str = [100, 87, 50, 51, 0]
with self.assertRaises(a , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=a )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.scheduler_classes[0]
lowerCAmelCase__ : str = self.get_scheduler_config()
lowerCAmelCase__ : Optional[int] = scheduler_class(**a )
lowerCAmelCase__ : str = [100, 87, 50, 1, 0]
lowerCAmelCase__ : int = len(a )
with self.assertRaises(a , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=a , timesteps=a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.scheduler_classes[0]
lowerCAmelCase__ : Dict = self.get_scheduler_config()
lowerCAmelCase__ : Optional[int] = scheduler_class(**a )
lowerCAmelCase__ : str = [scheduler.config.num_train_timesteps]
with self.assertRaises(
a , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=a ) | 69 | 1 |
import functools
from typing import Any
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> bool:
# Validation
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or len(SCREAMING_SNAKE_CASE_ ) == 0:
raise ValueError('the string should be not empty string' )
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not all(
isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and len(SCREAMING_SNAKE_CASE_ ) > 0 for item in words ):
raise ValueError('the words should be a list of non-empty strings' )
# Build trie
lowerCAmelCase__ : dict[str, Any] = {}
lowerCAmelCase__ : str = 'WORD_KEEPER'
for word in words:
lowerCAmelCase__ : Tuple = trie
for c in word:
if c not in trie_node:
lowerCAmelCase__ : Optional[Any] = {}
lowerCAmelCase__ : List[Any] = trie_node[c]
lowerCAmelCase__ : int = True
lowerCAmelCase__ : List[str] = len(SCREAMING_SNAKE_CASE_ )
# Dynamic programming method
@functools.cache
def is_breakable(SCREAMING_SNAKE_CASE_ ) -> bool:
if index == len_string:
return True
lowerCAmelCase__ : List[str] = trie
for i in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Dict = trie_node.get(string[i] , SCREAMING_SNAKE_CASE_ )
if trie_node is None:
return False
if trie_node.get(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) and is_breakable(i + 1 ):
return True
return False
return is_breakable(0 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 69 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( __magic_name__ ):
lowercase = ['image_processor', 'tokenizer']
lowercase = 'LayoutLMv3ImageProcessor'
lowercase = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self : Optional[int] , a : Union[str, Any]=None , a : Optional[Any]=None , **a : str ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , a , )
lowerCAmelCase__ : int = kwargs.pop('feature_extractor' )
lowerCAmelCase__ : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(a , a )
def __call__( self : List[Any] , a : List[Any] , a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , a : Union[List[List[int]], List[List[List[int]]]] = None , a : Optional[Union[List[int], List[List[int]]]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = None , a : Optional[int] = None , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : str , ):
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
# first, apply the image processor
lowerCAmelCase__ : List[str] = self.image_processor(images=a , return_tensors=a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(a , a ):
lowerCAmelCase__ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCAmelCase__ : List[str] = features['words']
lowerCAmelCase__ : List[Any] = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_token_type_ids=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_length=a , verbose=a , return_tensors=a , **a , )
# add pixel values
lowerCAmelCase__ : Tuple = features.pop('pixel_values' )
if return_overflowing_tokens is True:
lowerCAmelCase__ : List[str] = self.get_overflowing_images(a , encoded_inputs['overflow_to_sample_mapping'] )
lowerCAmelCase__ : List[str] = images
return encoded_inputs
def _lowerCamelCase ( self : Any , a : List[str] , a : int ):
'''simple docstring'''
lowerCAmelCase__ : int = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(a ) != len(a ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f''' {len(a )} and {len(a )}''' )
return images_with_overflow
def _lowerCamelCase ( self : Union[str, Any] , *a : Optional[Any] , **a : List[str] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a , **a )
def _lowerCamelCase ( self : Tuple , *a : List[str] , **a : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*a , **a )
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , a , )
return self.image_processor_class
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , a , )
return self.image_processor | 69 | 1 |
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""google/efficientnet-b7""": """https://huggingface.co/google/efficientnet-b7/resolve/main/config.json""",
}
class A__ ( __magic_name__ ):
lowercase = 'efficientnet'
def __init__( self : Dict , a : int = 3 , a : int = 600 , a : float = 2.0 , a : float = 3.1 , a : int = 8 , a : List[int] = [3, 3, 5, 3, 5, 5, 3] , a : List[int] = [32, 16, 24, 40, 80, 112, 192] , a : List[int] = [16, 24, 40, 80, 112, 192, 320] , a : List[int] = [] , a : List[int] = [1, 2, 2, 2, 1, 2, 1] , a : List[int] = [1, 2, 2, 3, 3, 4, 1] , a : List[int] = [1, 6, 6, 6, 6, 6, 6] , a : float = 0.2_5 , a : str = "swish" , a : int = 2_560 , a : str = "mean" , a : float = 0.0_2 , a : float = 0.0_0_1 , a : float = 0.9_9 , a : float = 0.5 , a : float = 0.2 , **a : str , ):
'''simple docstring'''
super().__init__(**a )
lowerCAmelCase__ : int = num_channels
lowerCAmelCase__ : Any = image_size
lowerCAmelCase__ : List[Any] = width_coefficient
lowerCAmelCase__ : Tuple = depth_coefficient
lowerCAmelCase__ : List[str] = depth_divisor
lowerCAmelCase__ : str = kernel_sizes
lowerCAmelCase__ : Union[str, Any] = in_channels
lowerCAmelCase__ : Tuple = out_channels
lowerCAmelCase__ : Dict = depthwise_padding
lowerCAmelCase__ : Optional[Any] = strides
lowerCAmelCase__ : Dict = num_block_repeats
lowerCAmelCase__ : List[str] = expand_ratios
lowerCAmelCase__ : int = squeeze_expansion_ratio
lowerCAmelCase__ : Optional[Any] = hidden_act
lowerCAmelCase__ : List[str] = hidden_dim
lowerCAmelCase__ : Optional[Any] = pooling_type
lowerCAmelCase__ : Optional[int] = initializer_range
lowerCAmelCase__ : Tuple = batch_norm_eps
lowerCAmelCase__ : str = batch_norm_momentum
lowerCAmelCase__ : Any = dropout_rate
lowerCAmelCase__ : List[str] = drop_connect_rate
lowerCAmelCase__ : Any = sum(a ) * 4
class A__ ( __magic_name__ ):
lowercase = version.parse('1.11' )
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return 1E-5 | 69 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class A__ ( __magic_name__ ):
def __init__( self : List[str] , a : Optional[Any] , a : int=13 , a : str=7 , a : Any=True , a : List[str]=True , a : Any=False , a : List[Any]=True , a : List[str]=99 , a : Optional[Any]=32 , a : List[str]=5 , a : List[Any]=4 , a : List[Any]=64 , a : List[Any]="gelu" , a : List[Any]=0.1 , a : List[Any]=0.1 , a : int=512 , a : Tuple=16 , a : List[str]=2 , a : int=0.0_2 , a : Union[str, Any]=3 , a : Any=4 , a : Union[str, Any]=None , a : Union[str, Any]=2 , a : List[str]=2 , a : int=2 , a : Dict=2 , a : List[str]=4 , a : str=1 , ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = parent
lowerCAmelCase__ : int = batch_size
lowerCAmelCase__ : str = seq_length
lowerCAmelCase__ : Tuple = is_training
lowerCAmelCase__ : List[str] = use_input_mask
lowerCAmelCase__ : Optional[int] = use_token_type_ids
lowerCAmelCase__ : Any = use_labels
lowerCAmelCase__ : List[Any] = vocab_size
lowerCAmelCase__ : str = hidden_size
lowerCAmelCase__ : str = num_hidden_layers
lowerCAmelCase__ : List[str] = num_attention_heads
lowerCAmelCase__ : int = intermediate_size
lowerCAmelCase__ : Optional[int] = hidden_act
lowerCAmelCase__ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase__ : Union[str, Any] = max_position_embeddings
lowerCAmelCase__ : Optional[int] = type_vocab_size
lowerCAmelCase__ : Dict = type_sequence_label_size
lowerCAmelCase__ : Optional[int] = initializer_range
lowerCAmelCase__ : List[Any] = num_labels
lowerCAmelCase__ : Any = num_choices
lowerCAmelCase__ : str = scope
lowerCAmelCase__ : Any = q_groups
lowerCAmelCase__ : Any = k_groups
lowerCAmelCase__ : Union[str, Any] = v_groups
lowerCAmelCase__ : int = post_attention_groups
lowerCAmelCase__ : str = intermediate_groups
lowerCAmelCase__ : Union[str, Any] = output_groups
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Tuple = None
if self.use_input_mask:
lowerCAmelCase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : List[Any] = None
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : Tuple = None
if self.use_labels:
lowerCAmelCase__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ : int = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ : str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def _lowerCamelCase ( self : Optional[int] , a : List[str] , a : List[str] , a : Any , a : Optional[int] , a : str , a : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = SqueezeBertModel(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : List[str] = model(a , a )
lowerCAmelCase__ : Any = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : str , a : Any , a : Tuple , a : int , a : Union[str, Any] , a : Tuple , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = SqueezeBertForMaskedLM(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Any = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : Optional[int] , a : Union[str, Any] , a : Optional[Any] , a : str , a : Optional[Any] , a : str , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Any = SqueezeBertForQuestionAnswering(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : List[str] = model(
a , attention_mask=a , start_positions=a , end_positions=a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self : Tuple , a : List[Any] , a : Optional[int] , a : Union[str, Any] , a : str , a : str , a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.num_labels
lowerCAmelCase__ : Dict = SqueezeBertForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Union[str, Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self : Any , a : int , a : Any , a : Dict , a : Any , a : Tuple , a : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = self.num_labels
lowerCAmelCase__ : Dict = SqueezeBertForTokenClassification(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self : str , a : Optional[int] , a : List[Any] , a : int , a : List[Any] , a : Union[str, Any] , a : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.num_choices
lowerCAmelCase__ : Union[str, Any] = SqueezeBertForMultipleChoice(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : List[str] = model(
a , attention_mask=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.prepare_config_and_inputs()
((lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__)) : List[Any] = config_and_inputs
lowerCAmelCase__ : Dict = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
lowercase = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = True
lowercase = False
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = SqueezeBertModelTester(self )
lowerCAmelCase__ : Dict = ConfigTester(self , config_class=a , dim=37 )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*a )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*a )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*a )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*a )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*a )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*a )
@slow
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Optional[int] = SqueezeBertModel.from_pretrained(a )
self.assertIsNotNone(a )
@require_sentencepiece
@require_tokenizers
@require_torch
class A__ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : int = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' )
lowerCAmelCase__ : str = torch.tensor([[1, 29_414, 232, 328, 740, 1_140, 12_695, 69, 13, 1_588, 2]] )
lowerCAmelCase__ : Any = model(a )[0]
lowerCAmelCase__ : Tuple = torch.Size((1, 3) )
self.assertEqual(output.shape , a )
lowerCAmelCase__ : int = torch.tensor([[0.6_4_0_1, -0.0_3_4_9, -0.6_0_4_1]] )
self.assertTrue(torch.allclose(a , a , atol=1E-4 ) ) | 69 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
lowerCamelCase__ = None
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""vocab_file""": """sentencepiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
"""tokenizer_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/tokenizer.json""",
},
}
lowerCamelCase__ = {
"""google/rembert""": 256,
}
lowerCamelCase__ = """▁"""
class A__ ( __magic_name__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = RemBertTokenizer
def __init__( self : Optional[Any] , a : str=None , a : Any=None , a : List[Any]=True , a : str=True , a : Dict=False , a : Dict="[CLS]" , a : int="[SEP]" , a : Tuple="<unk>" , a : Optional[Any]="[SEP]" , a : Tuple="<pad>" , a : Dict="[CLS]" , a : Optional[Any]="[MASK]" , **a : str , ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
a , tokenizer_file=a , do_lower_case=a , remove_space=a , keep_accents=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , **a , )
lowerCAmelCase__ : int = do_lower_case
lowerCAmelCase__ : int = remove_space
lowerCAmelCase__ : List[Any] = keep_accents
lowerCAmelCase__ : Optional[Any] = vocab_file
lowerCAmelCase__ : Union[str, Any] = False if not self.vocab_file else True
def _lowerCamelCase ( self : Dict , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
lowerCAmelCase__ : Dict = [self.sep_token_id]
lowerCAmelCase__ : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowerCamelCase ( self : str , a : List[int] , a : Optional[List[int]] = None , a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(a )) + [1] + ([0] * len(a )) + [1]
return [1] + ([0] * len(a )) + [1]
def _lowerCamelCase ( self : List[Any] , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = [self.sep_token_id]
lowerCAmelCase__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCamelCase ( self : Tuple , a : str , a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(a ):
logger.error('Vocabulary path ({}) should be a directory'.format(a ) )
return
lowerCAmelCase__ : int = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ):
copyfile(self.vocab_file , a )
return (out_vocab_file,) | 69 |
lowerCamelCase__ = """Alexander Joslin"""
import operator as op
from .stack import Stack
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> int:
lowerCAmelCase__ : Union[str, Any] = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
lowerCAmelCase__ : Stack[int] = Stack()
lowerCAmelCase__ : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(SCREAMING_SNAKE_CASE_ ) )
elif i in operators:
# RULE 2
operator_stack.push(SCREAMING_SNAKE_CASE_ )
elif i == ")":
# RULE 4
lowerCAmelCase__ : List[Any] = operator_stack.peek()
operator_stack.pop()
lowerCAmelCase__ : List[str] = operand_stack.peek()
operand_stack.pop()
lowerCAmelCase__ : List[Any] = operand_stack.peek()
operand_stack.pop()
lowerCAmelCase__ : Tuple = operators[opr](SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
operand_stack.push(SCREAMING_SNAKE_CASE_ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase__ = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""") | 69 | 1 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""vocab_file""": """sentencepiece.model"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
}
lowerCamelCase__ = {
"""google/rembert""": 256,
}
class A__ ( __magic_name__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : List[Any] , a : Union[str, Any] , a : Dict=False , a : Optional[Any]=True , a : Optional[Any]=True , a : Optional[int]="[CLS]" , a : Optional[Any]="[SEP]" , a : Dict="[UNK]" , a : Optional[int]="[SEP]" , a : Union[str, Any]="[PAD]" , a : List[Any]="[CLS]" , a : List[str]="[MASK]" , **a : Dict , ):
'''simple docstring'''
super().__init__(
do_lower_case=a , remove_space=a , keep_accents=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , **a , )
lowerCAmelCase__ : Dict = do_lower_case
lowerCAmelCase__ : int = remove_space
lowerCAmelCase__ : Any = keep_accents
lowerCAmelCase__ : Optional[int] = vocab_file
lowerCAmelCase__ : Optional[int] = spm.SentencePieceProcessor()
self.sp_model.Load(a )
@property
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
return len(self.sp_model )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Any = {self.convert_ids_to_tokens(a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : int ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.__dict__.copy()
lowerCAmelCase__ : Any = None
return state
def __setstate__( self : List[str] , a : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = d
lowerCAmelCase__ : int = spm.SentencePieceProcessor()
self.sp_model.Load(self.vocab_file )
def _lowerCamelCase ( self : str , a : List[str] , a : int=False ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.sp_model.EncodeAsPieces(a )
return pieces
def _lowerCamelCase ( self : int , a : List[str] ):
'''simple docstring'''
return self.sp_model.PieceToId(a )
def _lowerCamelCase ( self : Union[str, Any] , a : Any ):
'''simple docstring'''
return self.sp_model.IdToPiece(a )
def _lowerCamelCase ( self : Optional[int] , a : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.sp_model.decode_pieces(a )
return out_string
def _lowerCamelCase ( self : Tuple , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = [self.sep_token_id]
lowerCAmelCase__ : int = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowerCamelCase ( self : Any , a : List[int] , a : Optional[List[int]] = None , a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(a )) + [1] + ([0] * len(a )) + [1]
return [1] + ([0] * len(a )) + [1]
def _lowerCamelCase ( self : str , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
lowerCAmelCase__ : Dict = [self.sep_token_id]
lowerCAmelCase__ : int = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCamelCase ( self : Any , a : str , a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(a ):
logger.error('Vocabulary path ({}) should be a directory'.format(a ) )
return
lowerCAmelCase__ : int = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ):
copyfile(self.vocab_file , a )
return (out_vocab_file,) | 69 |
import numpy
class A__ :
def __init__( self : Tuple , a : numpy.ndarray , a : numpy.ndarray ):
'''simple docstring'''
lowerCAmelCase__ : int = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowerCAmelCase__ : Dict = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowerCAmelCase__ : List[str] = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowerCAmelCase__ : List[Any] = numpy.random.rand(3 , 1 )
# Real output values provided.
lowerCAmelCase__ : str = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowerCAmelCase__ : List[Any] = numpy.zeros(output_array.shape )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowerCAmelCase__ : Tuple = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowerCAmelCase__ : Any = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowerCAmelCase__ : Optional[Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowerCAmelCase__ : int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def _lowerCamelCase ( self : Optional[int] , a : numpy.ndarray , a : int , a : bool ):
'''simple docstring'''
for iteration in range(1 , iterations + 1 ):
lowerCAmelCase__ : Any = self.feedforward()
self.back_propagation()
if give_loss:
lowerCAmelCase__ : Tuple = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f'''Iteration {iteration} Loss: {loss}''' )
def _lowerCamelCase ( self : Optional[Any] , a : numpy.ndarray ):
'''simple docstring'''
lowerCAmelCase__ : Dict = input_arr
lowerCAmelCase__ : Any = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowerCAmelCase__ : int = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowerCAmelCase__ : List[Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> numpy.ndarray:
return 1 / (1 + numpy.exp(-value ))
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> numpy.ndarray:
return (value) * (1 - (value))
def lowerCAmelCase__ ( ) -> int:
lowerCAmelCase__ : Any = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowerCAmelCase__ : int = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
lowerCAmelCase__ : List[str] = TwoHiddenLayerNeuralNetwork(
input_array=SCREAMING_SNAKE_CASE_ , output_array=SCREAMING_SNAKE_CASE_ )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=SCREAMING_SNAKE_CASE_ , iterations=10 , give_loss=SCREAMING_SNAKE_CASE_ )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example() | 69 | 1 |
import itertools
import random
import unittest
import numpy as np
from transformers import WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST, WavaVecaConfig, WavaVecaFeatureExtractor
from transformers.testing_utils import require_torch, slow
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
lowerCamelCase__ = random.Random()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=1.0 , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_=None ) -> str:
if rng is None:
lowerCAmelCase__ : int = global_rng
lowerCAmelCase__ : Tuple = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class A__ ( unittest.TestCase ):
def __init__( self : str , a : str , a : Optional[Any]=7 , a : Optional[int]=400 , a : List[str]=2_000 , a : Optional[int]=1 , a : Any=0.0 , a : int=16_000 , a : Tuple=True , a : Tuple=True , ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = parent
lowerCAmelCase__ : Any = batch_size
lowerCAmelCase__ : Optional[Any] = min_seq_length
lowerCAmelCase__ : Any = max_seq_length
lowerCAmelCase__ : Optional[Any] = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
lowerCAmelCase__ : int = feature_size
lowerCAmelCase__ : int = padding_value
lowerCAmelCase__ : Tuple = sampling_rate
lowerCAmelCase__ : Optional[int] = return_attention_mask
lowerCAmelCase__ : int = do_normalize
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _lowerCamelCase ( self : Union[str, Any] , a : Any=False , a : str=False ):
'''simple docstring'''
def _flatten(a : Tuple ):
return list(itertools.chain(*a ) )
if equal_length:
lowerCAmelCase__ : Dict = floats_list((self.batch_size, self.max_seq_length) )
else:
# make sure that inputs increase in size
lowerCAmelCase__ : Any = [
_flatten(floats_list((x, self.feature_size) ) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
lowerCAmelCase__ : Optional[Any] = [np.asarray(a ) for x in speech_inputs]
return speech_inputs
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = WavaVecaFeatureExtractor
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = WavaVecaFeatureExtractionTester(self )
def _lowerCamelCase ( self : str , a : Optional[int] ):
'''simple docstring'''
self.assertTrue(np.all(np.mean(a , axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(a , axis=0 ) - 1 ) < 1E-3 ) )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
lowerCAmelCase__ : Any = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCAmelCase__ : int = [np.asarray(a ) for speech_input in speech_inputs]
# Test not batched input
lowerCAmelCase__ : str = feat_extract(speech_inputs[0] , return_tensors='np' ).input_values
lowerCAmelCase__ : List[Any] = feat_extract(np_speech_inputs[0] , return_tensors='np' ).input_values
self.assertTrue(np.allclose(a , a , atol=1E-3 ) )
# Test batched
lowerCAmelCase__ : Any = feat_extract(a , return_tensors='np' ).input_values
lowerCAmelCase__ : List[str] = feat_extract(a , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(a , a ):
self.assertTrue(np.allclose(a , a , atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
lowerCAmelCase__ : Dict = [floats_list((1, x) )[0] for x in (800, 800, 800)]
lowerCAmelCase__ : Dict = np.asarray(a )
lowerCAmelCase__ : Optional[int] = feat_extract(a , return_tensors='np' ).input_values
lowerCAmelCase__ : Dict = feat_extract(a , return_tensors='np' ).input_values
for enc_seq_a, enc_seq_a in zip(a , a ):
self.assertTrue(np.allclose(a , a , atol=1E-3 ) )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : str = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase__ : Optional[Any] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCAmelCase__ : Any = ['longest', 'max_length', 'do_not_pad']
lowerCAmelCase__ : Optional[Any] = [None, 1_600, None]
for max_length, padding in zip(a , a ):
lowerCAmelCase__ : Optional[Any] = feat_extract(a , padding=a , max_length=a , return_tensors='np' )
lowerCAmelCase__ : Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self.assertTrue(input_values[0][800:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self.assertTrue(input_values[0][1_000:].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase__ : Union[str, Any] = range(800 , 1_400 , 200 )
lowerCAmelCase__ : Any = [floats_list((1, x) )[0] for x in lengths]
lowerCAmelCase__ : Dict = ['longest', 'max_length', 'do_not_pad']
lowerCAmelCase__ : Optional[int] = [None, 1_600, None]
for max_length, padding in zip(a , a ):
lowerCAmelCase__ : List[str] = feat_extract(a , max_length=a , padding=a )
lowerCAmelCase__ : Optional[Any] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0][:800] )
self._check_zero_mean_unit_variance(input_values[1][:1_000] )
self._check_zero_mean_unit_variance(input_values[2][:1_200] )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase__ : int = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCAmelCase__ : int = feat_extract(
a , truncation=a , max_length=1_000 , padding='max_length' , return_tensors='np' )
lowerCAmelCase__ : Any = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1] )
self._check_zero_mean_unit_variance(input_values[2] )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase__ : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCAmelCase__ : List[Any] = feat_extract(
a , truncation=a , max_length=1_000 , padding='longest' , return_tensors='np' )
lowerCAmelCase__ : str = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertTrue(input_values.shape == (3, 1_000) )
lowerCAmelCase__ : Optional[int] = [floats_list((1, x) )[0] for x in range(800 , 1_400 , 200 )]
lowerCAmelCase__ : Any = feat_extract(
a , truncation=a , max_length=2_000 , padding='longest' , return_tensors='np' )
lowerCAmelCase__ : Optional[int] = processed.input_values
self._check_zero_mean_unit_variance(input_values[0, :800] )
self._check_zero_mean_unit_variance(input_values[1, :1_000] )
self._check_zero_mean_unit_variance(input_values[2] )
# make sure that if max_length > longest -> then pad to longest
self.assertTrue(input_values.shape == (3, 1_200) )
@require_torch
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
import torch
lowerCAmelCase__ : List[Any] = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
lowerCAmelCase__ : str = np.random.rand(100 ).astype(np.floataa )
lowerCAmelCase__ : int = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
lowerCAmelCase__ : List[str] = feature_extractor.pad([{'input_values': inputs}] , return_tensors='np' )
self.assertTrue(np_processed.input_values.dtype == np.floataa )
lowerCAmelCase__ : Dict = feature_extractor.pad([{'input_values': inputs}] , return_tensors='pt' )
self.assertTrue(pt_processed.input_values.dtype == torch.floataa )
@slow
@require_torch
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
for model_id in WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST:
lowerCAmelCase__ : int = WavaVecaConfig.from_pretrained(a )
lowerCAmelCase__ : Tuple = WavaVecaFeatureExtractor.from_pretrained(a )
# only "layer" feature extraction norm should make use of
# attention_mask
self.assertEqual(feat_extract.return_attention_mask , config.feat_extract_norm == 'layer' ) | 69 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
def __init__( self : int , a : str , a : Union[str, Any]=13 , a : int=32 , a : Optional[Any]=2 , a : Tuple=3 , a : List[Any]=16 , a : List[str]=[1, 2, 1] , a : int=[2, 2, 4] , a : int=2 , a : Optional[Any]=2.0 , a : Optional[int]=True , a : Dict=0.0 , a : Any=0.0 , a : int=0.1 , a : List[str]="gelu" , a : Optional[Any]=False , a : str=True , a : Dict=0.0_2 , a : Any=1E-5 , a : Optional[int]=True , a : str=None , a : str=True , a : int=10 , a : str=8 , ):
'''simple docstring'''
lowerCAmelCase__ : str = parent
lowerCAmelCase__ : Union[str, Any] = batch_size
lowerCAmelCase__ : List[str] = image_size
lowerCAmelCase__ : Optional[Any] = patch_size
lowerCAmelCase__ : Tuple = num_channels
lowerCAmelCase__ : Optional[int] = embed_dim
lowerCAmelCase__ : Tuple = depths
lowerCAmelCase__ : List[str] = num_heads
lowerCAmelCase__ : List[Any] = window_size
lowerCAmelCase__ : Any = mlp_ratio
lowerCAmelCase__ : Optional[Any] = qkv_bias
lowerCAmelCase__ : Any = hidden_dropout_prob
lowerCAmelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase__ : int = drop_path_rate
lowerCAmelCase__ : Optional[Any] = hidden_act
lowerCAmelCase__ : int = use_absolute_embeddings
lowerCAmelCase__ : List[str] = patch_norm
lowerCAmelCase__ : Optional[int] = layer_norm_eps
lowerCAmelCase__ : List[str] = initializer_range
lowerCAmelCase__ : Optional[Any] = is_training
lowerCAmelCase__ : List[Any] = scope
lowerCAmelCase__ : Dict = use_labels
lowerCAmelCase__ : List[Any] = type_sequence_label_size
lowerCAmelCase__ : Optional[Any] = encoder_stride
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : Optional[Any] = None
if self.use_labels:
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : int = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _lowerCamelCase ( self : List[str] , a : Any , a : str , a : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = SwinvaModel(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[int] = model(a )
lowerCAmelCase__ : str = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCAmelCase__ : Tuple = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _lowerCamelCase ( self : Union[str, Any] , a : Optional[Any] , a : Tuple , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Any = SwinvaForMaskedImageModeling(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : str = model(a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCAmelCase__ : Any = 1
lowerCAmelCase__ : Dict = SwinvaForMaskedImageModeling(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ : List[str] = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _lowerCamelCase ( self : Union[str, Any] , a : int , a : str , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : str = self.type_sequence_label_size
lowerCAmelCase__ : List[Any] = SwinvaForImageClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Union[str, Any] = model(a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = config_and_inputs
lowerCAmelCase__ : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
lowercase = (
{'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = SwinvaModelTester(self )
lowerCAmelCase__ : int = ConfigTester(self , config_class=a , embed_dim=37 )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds' )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : int = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Optional[int] = model_class(a )
lowerCAmelCase__ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : Tuple = [*signature.parameters.keys()]
lowerCAmelCase__ : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Optional[int] = True
for model_class in self.all_model_classes:
lowerCAmelCase__ : Tuple = True
lowerCAmelCase__ : str = False
lowerCAmelCase__ : List[Any] = True
lowerCAmelCase__ : Dict = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : int = model(**self._prepare_for_class(a , a ) )
lowerCAmelCase__ : Dict = outputs.attentions
lowerCAmelCase__ : Dict = len(self.model_tester.depths )
self.assertEqual(len(a ) , a )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase__ : List[str] = True
lowerCAmelCase__ : Optional[int] = config.window_size**2
lowerCAmelCase__ : str = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model(**self._prepare_for_class(a , a ) )
lowerCAmelCase__ : Optional[Any] = outputs.attentions
self.assertEqual(len(a ) , a )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
lowerCAmelCase__ : Tuple = len(a )
# Check attention is always last and order is fine
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : str = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(**self._prepare_for_class(a , a ) )
if hasattr(self.model_tester , 'num_hidden_states_types' ):
lowerCAmelCase__ : Optional[Any] = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
lowerCAmelCase__ : Any = 2
self.assertEqual(out_len + added_hidden_states , len(a ) )
lowerCAmelCase__ : Dict = outputs.attentions
self.assertEqual(len(a ) , a )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def _lowerCamelCase ( self : int , a : Optional[int] , a : int , a : Optional[Any] , a : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model(**self._prepare_for_class(a , a ) )
lowerCAmelCase__ : Optional[Any] = outputs.hidden_states
lowerCAmelCase__ : str = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(a ) , a )
# Swinv2 has a different seq_length
lowerCAmelCase__ : int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase__ : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCAmelCase__ : Union[str, Any] = outputs.reshaped_hidden_states
self.assertEqual(len(a ) , a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = reshaped_hidden_states[0].shape
lowerCAmelCase__ : List[str] = (
reshaped_hidden_states[0].view(a , a , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowerCAmelCase__ : Any = True
self.check_hidden_states_output(a , a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ : List[str] = True
self.check_hidden_states_output(a , a , a , a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Any = 3
lowerCAmelCase__ : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCAmelCase__ : str = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase__ : str = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCAmelCase__ : Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowerCAmelCase__ : str = True
self.check_hidden_states_output(a , a , a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ : Any = True
self.check_hidden_states_output(a , a , a , (padded_height, padded_width) )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
@slow
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : List[str] = SwinvaModel.from_pretrained(a )
self.assertIsNotNone(a )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Optional[int] = _config_zero_init(a )
for model_class in self.all_model_classes:
lowerCAmelCase__ : int = model_class(config=a )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class A__ ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' )
if is_vision_available()
else None
)
@slow
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to(
a )
lowerCAmelCase__ : Dict = self.default_image_processor
lowerCAmelCase__ : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCAmelCase__ : Any = image_processor(images=a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowerCAmelCase__ : Union[str, Any] = model(**a )
# verify the logits
lowerCAmelCase__ : List[str] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , a )
lowerCAmelCase__ : Optional[Any] = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1E-4 ) ) | 69 | 1 |
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
from shutil import copyfile
from huggingface_hub import HfFolder, Repository, create_repo, delete_repo
from requests.exceptions import HTTPError
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
PROCESSOR_MAPPING,
TOKENIZER_MAPPING,
AutoConfig,
AutoFeatureExtractor,
AutoProcessor,
AutoTokenizer,
BertTokenizer,
ProcessorMixin,
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
)
from transformers.testing_utils import TOKEN, USER, get_tests_dir, is_staging_test
from transformers.tokenization_utils import TOKENIZER_CONFIG_FILE
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_tokenizers_available
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
from test_module.custom_processing import CustomProcessor # noqa E402
from test_module.custom_tokenization import CustomTokenizer # noqa E402
lowerCamelCase__ = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
lowerCamelCase__ = get_tests_dir("""fixtures/vocab.json""")
lowerCamelCase__ = get_tests_dir("""fixtures""")
class A__ ( unittest.TestCase ):
lowercase = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = 0
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(a , a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : Optional[Any] = WavaVecaConfig()
lowerCAmelCase__ : List[str] = AutoProcessor.from_pretrained('facebook/wav2vec2-base-960h' )
# save in new folder
model_config.save_pretrained(a )
processor.save_pretrained(a )
lowerCAmelCase__ : Tuple = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
# copy relevant files
copyfile(a , os.path.join(a , a ) )
copyfile(a , os.path.join(a , 'vocab.json' ) )
lowerCAmelCase__ : Tuple = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : Dict = WavaVecaFeatureExtractor()
lowerCAmelCase__ : List[Any] = AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h' )
lowerCAmelCase__ : Dict = WavaVecaProcessor(a , a )
# save in new folder
processor.save_pretrained(a )
# drop `processor_class` in tokenizer
with open(os.path.join(a , a ) , 'r' ) as f:
lowerCAmelCase__ : List[str] = json.load(a )
config_dict.pop('processor_class' )
with open(os.path.join(a , a ) , 'w' ) as f:
f.write(json.dumps(a ) )
lowerCAmelCase__ : int = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def _lowerCamelCase ( self : str ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : Optional[Any] = WavaVecaFeatureExtractor()
lowerCAmelCase__ : Dict = AutoTokenizer.from_pretrained('facebook/wav2vec2-base-960h' )
lowerCAmelCase__ : List[str] = WavaVecaProcessor(a , a )
# save in new folder
processor.save_pretrained(a )
# drop `processor_class` in feature extractor
with open(os.path.join(a , a ) , 'r' ) as f:
lowerCAmelCase__ : Optional[Any] = json.load(a )
config_dict.pop('processor_class' )
with open(os.path.join(a , a ) , 'w' ) as f:
f.write(json.dumps(a ) )
lowerCAmelCase__ : Optional[Any] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : Any = WavaVecaConfig(processor_class='Wav2Vec2Processor' )
model_config.save_pretrained(a )
# copy relevant files
copyfile(a , os.path.join(a , 'vocab.json' ) )
# create emtpy sample processor
with open(os.path.join(a , a ) , 'w' ) as f:
f.write('{}' )
lowerCAmelCase__ : Tuple = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
with self.assertRaises(a ):
lowerCAmelCase__ : Dict = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(a ):
lowerCAmelCase__ : Dict = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=a )
lowerCAmelCase__ : Any = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' , trust_remote_code=a )
self.assertTrue(processor.special_attribute_present )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
lowerCAmelCase__ : Dict = processor.feature_extractor
self.assertTrue(feature_extractor.special_attribute_present )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
lowerCAmelCase__ : Optional[int] = processor.tokenizer
self.assertTrue(tokenizer.special_attribute_present )
if is_tokenizers_available():
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizerFast' )
# Test we can also load the slow version
lowerCAmelCase__ : Any = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=a , use_fast=a )
lowerCAmelCase__ : List[str] = new_processor.tokenizer
self.assertTrue(new_tokenizer.special_attribute_present )
self.assertEqual(new_tokenizer.__class__.__name__ , 'NewTokenizer' )
else:
self.assertEqual(tokenizer.__class__.__name__ , 'NewTokenizer' )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
try:
AutoConfig.register('custom' , a )
AutoFeatureExtractor.register(a , a )
AutoTokenizer.register(a , slow_tokenizer_class=a )
AutoProcessor.register(a , a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a ):
AutoProcessor.register(a , a )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCAmelCase__ : Optional[Any] = CustomFeatureExtractor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ : List[Any] = os.path.join(a , 'vocab.txt' )
with open(a , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
lowerCAmelCase__ : Tuple = CustomTokenizer(a )
lowerCAmelCase__ : Dict = CustomProcessor(a , a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(a )
lowerCAmelCase__ : List[str] = AutoProcessor.from_pretrained(a )
self.assertIsInstance(a , a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
class A__ ( __magic_name__ ):
lowercase = False
class A__ ( __magic_name__ ):
lowercase = False
class A__ ( __magic_name__ ):
lowercase = 'AutoFeatureExtractor'
lowercase = 'AutoTokenizer'
lowercase = False
try:
AutoConfig.register('custom' , a )
AutoFeatureExtractor.register(a , a )
AutoTokenizer.register(a , slow_tokenizer_class=a )
AutoProcessor.register(a , a )
# If remote code is not set, the default is to use local classes.
lowerCAmelCase__ : Dict = AutoProcessor.from_pretrained('hf-internal-testing/test_dynamic_processor' )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote code is disabled, we load the local ones.
lowerCAmelCase__ : Dict = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=a )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
self.assertFalse(processor.special_attribute_present )
self.assertFalse(processor.feature_extractor.special_attribute_present )
self.assertFalse(processor.tokenizer.special_attribute_present )
# If remote is enabled, we load from the Hub.
lowerCAmelCase__ : Dict = AutoProcessor.from_pretrained(
'hf-internal-testing/test_dynamic_processor' , trust_remote_code=a )
self.assertEqual(processor.__class__.__name__ , 'NewProcessor' )
self.assertTrue(processor.special_attribute_present )
self.assertTrue(processor.feature_extractor.special_attribute_present )
self.assertTrue(processor.tokenizer.special_attribute_present )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
if CustomConfig in TOKENIZER_MAPPING._extra_content:
del TOKENIZER_MAPPING._extra_content[CustomConfig]
if CustomConfig in PROCESSOR_MAPPING._extra_content:
del PROCESSOR_MAPPING._extra_content[CustomConfig]
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = AutoProcessor.from_pretrained('hf-internal-testing/tiny-random-bert' )
self.assertEqual(processor.__class__.__name__ , 'BertTokenizerFast' )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = AutoProcessor.from_pretrained('hf-internal-testing/tiny-random-convnext' )
self.assertEqual(processor.__class__.__name__ , 'ConvNextImageProcessor' )
@is_staging_test
class A__ ( unittest.TestCase ):
lowercase = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'bla', 'blou']
@classmethod
def _lowerCamelCase ( cls : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = TOKEN
HfFolder.save_token(a )
@classmethod
def _lowerCamelCase ( cls : str ):
'''simple docstring'''
try:
delete_repo(token=cls._token , repo_id='test-processor' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='valid_org/test-processor-org' )
except HTTPError:
pass
try:
delete_repo(token=cls._token , repo_id='test-dynamic-processor' )
except HTTPError:
pass
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = WavaVecaProcessor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(a , 'test-processor' ) , push_to_hub=a , use_auth_token=self._token )
lowerCAmelCase__ : int = WavaVecaProcessor.from_pretrained(f'''{USER}/test-processor''' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(a , getattr(new_processor.feature_extractor , a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = WavaVecaProcessor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
processor.save_pretrained(
os.path.join(a , 'test-processor-org' ) , push_to_hub=a , use_auth_token=self._token , organization='valid_org' , )
lowerCAmelCase__ : List[str] = WavaVecaProcessor.from_pretrained('valid_org/test-processor-org' )
for k, v in processor.feature_extractor.__dict__.items():
self.assertEqual(a , getattr(new_processor.feature_extractor , a ) )
self.assertDictEqual(new_processor.tokenizer.get_vocab() , processor.tokenizer.get_vocab() )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
CustomFeatureExtractor.register_for_auto_class()
CustomTokenizer.register_for_auto_class()
CustomProcessor.register_for_auto_class()
lowerCAmelCase__ : Union[str, Any] = CustomFeatureExtractor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
lowerCAmelCase__ : Optional[int] = os.path.join(a , 'vocab.txt' )
with open(a , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in self.vocab_tokens] ) )
lowerCAmelCase__ : Any = CustomTokenizer(a )
lowerCAmelCase__ : Tuple = CustomProcessor(a , a )
with tempfile.TemporaryDirectory() as tmp_dir:
create_repo(f'''{USER}/test-dynamic-processor''' , token=self._token )
lowerCAmelCase__ : str = Repository(a , clone_from=f'''{USER}/test-dynamic-processor''' , token=self._token )
processor.save_pretrained(a )
# This has added the proper auto_map field to the feature extractor config
self.assertDictEqual(
processor.feature_extractor.auto_map , {
'AutoFeatureExtractor': 'custom_feature_extraction.CustomFeatureExtractor',
'AutoProcessor': 'custom_processing.CustomProcessor',
} , )
# This has added the proper auto_map field to the tokenizer config
with open(os.path.join(a , 'tokenizer_config.json' ) ) as f:
lowerCAmelCase__ : List[str] = json.load(a )
self.assertDictEqual(
tokenizer_config['auto_map'] , {
'AutoTokenizer': ['custom_tokenization.CustomTokenizer', None],
'AutoProcessor': 'custom_processing.CustomProcessor',
} , )
# The code has been copied from fixtures
self.assertTrue(os.path.isfile(os.path.join(a , 'custom_feature_extraction.py' ) ) )
self.assertTrue(os.path.isfile(os.path.join(a , 'custom_tokenization.py' ) ) )
self.assertTrue(os.path.isfile(os.path.join(a , 'custom_processing.py' ) ) )
repo.push_to_hub()
lowerCAmelCase__ : int = AutoProcessor.from_pretrained(f'''{USER}/test-dynamic-processor''' , trust_remote_code=a )
# Can't make an isinstance check because the new_processor is from the CustomProcessor class of a dynamic module
self.assertEqual(new_processor.__class__.__name__ , 'CustomProcessor' ) | 69 |
from itertools import permutations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bool:
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
lowerCAmelCase__ : str = [7, 11, 13, 17]
for i, test in enumerate(SCREAMING_SNAKE_CASE_ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = 10 ) -> int:
return sum(
int(''.join(map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) )
for num in permutations(range(SCREAMING_SNAKE_CASE_ ) )
if is_substring_divisible(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
print(F"""{solution() = }""") | 69 | 1 |
import math
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0 , SCREAMING_SNAKE_CASE_ = 0 ) -> list:
lowerCAmelCase__ : str = end or len(SCREAMING_SNAKE_CASE_ )
for i in range(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Optional[Any] = i
lowerCAmelCase__ : int = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
lowerCAmelCase__ : str = array[temp_index - 1]
temp_index -= 1
lowerCAmelCase__ : Tuple = temp_index_value
return array
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> None: # Max Heap
lowerCAmelCase__ : Any = index
lowerCAmelCase__ : int = 2 * index + 1 # Left Node
lowerCAmelCase__ : Optional[int] = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
lowerCAmelCase__ : List[str] = left_index
if right_index < heap_size and array[largest] < array[right_index]:
lowerCAmelCase__ : Union[str, Any] = right_index
if largest != index:
lowerCAmelCase__ , lowerCAmelCase__ : Dict = array[largest], array[index]
heapify(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> list:
lowerCAmelCase__ : int = len(SCREAMING_SNAKE_CASE_ )
for i in range(n // 2 , -1 , -1 ):
heapify(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for i in range(n - 1 , 0 , -1 ):
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = array[0], array[i]
heapify(SCREAMING_SNAKE_CASE_ , 0 , SCREAMING_SNAKE_CASE_ )
return array
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
lowerCAmelCase__ : List[Any] = low
lowerCAmelCase__ : Dict = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = array[j], array[i]
i += 1
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> list:
if len(SCREAMING_SNAKE_CASE_ ) == 0:
return array
lowerCAmelCase__ : int = 2 * math.ceil(math.loga(len(SCREAMING_SNAKE_CASE_ ) ) )
lowerCAmelCase__ : Union[str, Any] = 16
return intro_sort(SCREAMING_SNAKE_CASE_ , 0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> list:
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(SCREAMING_SNAKE_CASE_ )
max_depth -= 1
lowerCAmelCase__ : Optional[Any] = median_of_a(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , start + ((end - start) // 2) + 1 , end - 1 )
lowerCAmelCase__ : str = partition(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
intro_sort(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = p
return insertion_sort(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
lowerCamelCase__ = input("""Enter numbers separated by a comma : """).strip()
lowerCamelCase__ = [float(item) for item in user_input.split(""",""")]
print(sort(unsorted)) | 69 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = ConsistencyModelPipeline
lowercase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowercase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
lowercase = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'output_type',
'return_dict',
'callback',
'callback_steps',
] )
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet' , )
return unet
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , )
return unet
def _lowerCamelCase ( self : Optional[Any] , a : Union[str, Any]=False ):
'''simple docstring'''
if class_cond:
lowerCAmelCase__ : Tuple = self.dummy_cond_unet
else:
lowerCAmelCase__ : Dict = self.dummy_uncond_unet
# Default to CM multistep sampler
lowerCAmelCase__ : Optional[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
lowerCAmelCase__ : List[Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def _lowerCamelCase ( self : int , a : Optional[int] , a : Any=0 ):
'''simple docstring'''
if str(a ).startswith('mps' ):
lowerCAmelCase__ : List[str] = torch.manual_seed(a )
else:
lowerCAmelCase__ : str = torch.Generator(device=a ).manual_seed(a )
lowerCAmelCase__ : str = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [22, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : Optional[Any] = self.get_dummy_components()
lowerCAmelCase__ : List[Any] = ConsistencyModelPipeline(**a )
lowerCAmelCase__ : Tuple = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : str = self.get_dummy_inputs(a )
lowerCAmelCase__ : str = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ : str = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Tuple = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : Tuple = self.get_dummy_components(class_cond=a )
lowerCAmelCase__ : Union[str, Any] = ConsistencyModelPipeline(**a )
lowerCAmelCase__ : Tuple = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a )
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : Union[str, Any] = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
lowerCAmelCase__ : str = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : Union[str, Any] = self.get_dummy_components()
lowerCAmelCase__ : Tuple = ConsistencyModelPipeline(**a )
lowerCAmelCase__ : Dict = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Union[str, Any] = self.get_dummy_inputs(a )
lowerCAmelCase__ : Optional[Any] = 1
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : List[Any] = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Optional[Any] = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : Optional[int] = self.get_dummy_components(class_cond=a )
lowerCAmelCase__ : List[Any] = ConsistencyModelPipeline(**a )
lowerCAmelCase__ : Optional[Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Tuple = self.get_dummy_inputs(a )
lowerCAmelCase__ : Dict = 1
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ : str = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Dict = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Optional[Any] , a : Tuple=0 , a : Optional[Any]=False , a : Optional[Any]="cpu" , a : Union[str, Any]=torch.floataa , a : Dict=(1, 3, 64, 64) ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(a )
lowerCAmelCase__ : List[Any] = {
'num_inference_steps': None,
'timesteps': [22, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
lowerCAmelCase__ : Optional[int] = self.get_fixed_latents(seed=a , device=a , dtype=a , shape=a )
lowerCAmelCase__ : Tuple = latents
return inputs
def _lowerCamelCase ( self : str , a : Tuple=0 , a : Tuple="cpu" , a : Tuple=torch.floataa , a : str=(1, 3, 64, 64) ):
'''simple docstring'''
if type(a ) == str:
lowerCAmelCase__ : str = torch.device(a )
lowerCAmelCase__ : List[str] = torch.Generator(device=a ).manual_seed(a )
lowerCAmelCase__ : Any = randn_tensor(a , generator=a , device=a , dtype=a )
return latents
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : int = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
lowerCAmelCase__ : List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
lowerCAmelCase__ : List[Any] = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Optional[Any] = self.get_inputs()
lowerCAmelCase__ : Dict = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : List[str] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Union[str, Any] = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
lowerCAmelCase__ : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
lowerCAmelCase__ : Optional[int] = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[str] = self.get_inputs()
lowerCAmelCase__ : Union[str, Any] = 1
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : List[str] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : Optional[int] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Union[str, Any] = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
@require_torch_a
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
lowerCAmelCase__ : List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
lowerCAmelCase__ : Tuple = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : str = self.get_inputs(get_fixed_latents=a , device=a )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ):
lowerCAmelCase__ : Dict = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : str = image[0, -3:, -3:, -1]
lowerCAmelCase__ : str = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@require_torch_a
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
lowerCAmelCase__ : List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
lowerCAmelCase__ : Dict = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Any = self.get_inputs(get_fixed_latents=a , device=a )
lowerCAmelCase__ : List[str] = 1
lowerCAmelCase__ : str = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ):
lowerCAmelCase__ : List[str] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : Dict = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Optional[int] = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 | 69 | 1 |
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
lowerCamelCase__ = get_tests_dir("""fixtures""")
lowerCamelCase__ = get_tests_dir("""fixtures/dummy_feature_extractor_config.json""")
lowerCamelCase__ = get_tests_dir("""fixtures/dummy-config.json""")
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = 0
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : int = AutoFeatureExtractor.from_pretrained('facebook/wav2vec2-base-960h' )
self.assertIsInstance(a , a )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = AutoFeatureExtractor.from_pretrained(a )
self.assertIsInstance(a , a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdirname:
lowerCAmelCase__ : List[Any] = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
lowerCAmelCase__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(a ).to_dict()
config_dict.pop('feature_extractor_type' )
lowerCAmelCase__ : Dict = WavaVecaFeatureExtractor(**a )
# save in new folder
model_config.save_pretrained(a )
config.save_pretrained(a )
lowerCAmelCase__ : Tuple = AutoFeatureExtractor.from_pretrained(a )
# make sure private variable is not incorrectly saved
lowerCAmelCase__ : int = json.loads(config.to_json_string() )
self.assertTrue('_processor_class' not in dict_as_saved )
self.assertIsInstance(a , a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = AutoFeatureExtractor.from_pretrained(a )
self.assertIsInstance(a , a )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
with self.assertRaisesRegex(
a , 'bert-base is not a local folder and is not a valid model identifier' ):
lowerCAmelCase__ : List[str] = AutoFeatureExtractor.from_pretrained('bert-base' )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
a , R'aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)' ):
lowerCAmelCase__ : Optional[Any] = AutoFeatureExtractor.from_pretrained(a , revision='aaaaaa' )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
with self.assertRaisesRegex(
a , 'hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json.' , ):
lowerCAmelCase__ : Optional[int] = AutoFeatureExtractor.from_pretrained('hf-internal-testing/config-no-model' )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
with self.assertRaises(a ):
lowerCAmelCase__ : Any = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
# If remote code is disabled, we can't load this config.
with self.assertRaises(a ):
lowerCAmelCase__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=a )
lowerCAmelCase__ : Dict = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=a )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(a )
lowerCAmelCase__ : Optional[Any] = AutoFeatureExtractor.from_pretrained(a , trust_remote_code=a )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
try:
AutoConfig.register('custom' , a )
AutoFeatureExtractor.register(a , a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(a ):
AutoFeatureExtractor.register(a , a )
# Now that the config is registered, it can be used as any other config with the auto-API
lowerCAmelCase__ : Dict = CustomFeatureExtractor.from_pretrained(a )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(a )
lowerCAmelCase__ : Tuple = AutoFeatureExtractor.from_pretrained(a )
self.assertIsInstance(a , a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
class A__ ( __magic_name__ ):
lowercase = True
try:
AutoConfig.register('custom' , a )
AutoFeatureExtractor.register(a , a )
# If remote code is not set, the default is to use local
lowerCAmelCase__ : Dict = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
lowerCAmelCase__ : List[str] = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=a )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
lowerCAmelCase__ : int = AutoFeatureExtractor.from_pretrained(
'hf-internal-testing/test_dynamic_feature_extractor' , trust_remote_code=a )
self.assertEqual(feature_extractor.__class__.__name__ , 'NewFeatureExtractor' )
self.assertTrue(not hasattr(a , 'is_local' ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig] | 69 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class A__ ( __magic_name__ ):
def __init__( self : int , a : List[str] , a : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = params
lowerCAmelCase__ : Union[str, Any] = np.array(a )
lowerCAmelCase__ : List[Any] = np.array([len(a ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : str , a : List[str] ):
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self : Optional[int] ):
'''simple docstring'''
return len(self.lengths )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.params.max_model_input_size
lowerCAmelCase__ : Optional[int] = self.lengths > max_len
logger.info(f'''Splitting {sum(a )} too long sequences.''' )
def divide_chunks(a : List[str] , a : Tuple ):
return [l[i : i + n] for i in range(0 , len(a ) , a )]
lowerCAmelCase__ : Union[str, Any] = []
lowerCAmelCase__ : Union[str, Any] = []
if self.params.mlm:
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
else:
lowerCAmelCase__ , lowerCAmelCase__ : int = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
lowerCAmelCase__ : Optional[int] = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
lowerCAmelCase__ : Dict = np.insert(a , 0 , a )
if sub_s[-1] != sep_id:
lowerCAmelCase__ : Dict = np.insert(a , len(a ) , a )
assert len(a ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(a )
new_tok_ids.extend(a )
new_lengths.extend([len(a ) for l in sub_seqs] )
lowerCAmelCase__ : str = np.array(a )
lowerCAmelCase__ : Optional[Any] = np.array(a )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = len(self )
lowerCAmelCase__ : List[Any] = self.lengths > 11
lowerCAmelCase__ : Dict = self.token_ids[indices]
lowerCAmelCase__ : Tuple = self.lengths[indices]
lowerCAmelCase__ : Any = len(self )
logger.info(f'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
lowerCAmelCase__ : int = self.params.special_tok_ids['unk_token']
lowerCAmelCase__ : str = len(self )
lowerCAmelCase__ : List[str] = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
lowerCAmelCase__ : int = (unk_occs / self.lengths) < 0.5
lowerCAmelCase__ : List[str] = self.token_ids[indices]
lowerCAmelCase__ : Optional[Any] = self.lengths[indices]
lowerCAmelCase__ : Union[str, Any] = len(self )
logger.info(f'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
if not self.params.is_master:
return
logger.info(f'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def _lowerCamelCase ( self : int , a : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = [t[0] for t in batch]
lowerCAmelCase__ : List[str] = [t[1] for t in batch]
assert len(a ) == len(a )
# Max for paddings
lowerCAmelCase__ : List[str] = max(a )
# Pad token ids
if self.params.mlm:
lowerCAmelCase__ : str = self.params.special_tok_ids['pad_token']
else:
lowerCAmelCase__ : Optional[int] = self.params.special_tok_ids['unk_token']
lowerCAmelCase__ : Tuple = [list(t.astype(a ) ) + [pad_idx] * (max_seq_len_ - len(a )) for t in token_ids]
assert len(tk_ ) == len(a )
assert all(len(a ) == max_seq_len_ for t in tk_ )
lowerCAmelCase__ : Union[str, Any] = torch.tensor(tk_ ) # (bs, max_seq_len_)
lowerCAmelCase__ : List[str] = torch.tensor(a ) # (bs)
return tk_t, lg_t | 69 | 1 |
# Logistic Regression from scratch
# In[62]:
# In[63]:
# importing all the required libraries
import numpy as np
from matplotlib import pyplot as plt
from sklearn import datasets
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Dict:
return 1 / (1 + np.exp(-z ))
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
return (-y * np.log(SCREAMING_SNAKE_CASE_ ) - (1 - y) * np.log(1 - h )).mean()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
lowerCAmelCase__ : Optional[Any] = np.dot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return np.sum(y * scores - np.log(1 + np.exp(SCREAMING_SNAKE_CASE_ ) ) )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=70_000 ) -> int:
lowerCAmelCase__ : Union[str, Any] = np.zeros(x.shape[1] )
for iterations in range(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Union[str, Any] = np.dot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Any = sigmoid_function(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[Any] = np.dot(x.T , h - y ) / y.size
lowerCAmelCase__ : Optional[Any] = theta - alpha * gradient # updating the weights
lowerCAmelCase__ : Any = np.dot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = sigmoid_function(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Union[str, Any] = cost_function(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if iterations % 100 == 0:
print(F'''loss: {j} \t''' ) # printing the loss after every 100 iterations
return theta
# In[68]:
if __name__ == "__main__":
lowerCamelCase__ = datasets.load_iris()
lowerCamelCase__ = iris.data[:, :2]
lowerCamelCase__ = (iris.target != 0) * 1
lowerCamelCase__ = 0.1
lowerCamelCase__ = logistic_reg(alpha, x, y, max_iterations=7_0000)
print("""theta: """, theta) # printing the theta i.e our weights vector
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Dict:
return sigmoid_function(
np.dot(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) # predicting the value of probability from the logistic regression algorithm
plt.figure(figsize=(10, 6))
plt.scatter(x[y == 0][:, 0], x[y == 0][:, 1], color="""b""", label="""0""")
plt.scatter(x[y == 1][:, 0], x[y == 1][:, 1], color="""r""", label="""1""")
((lowerCamelCase__) , (lowerCamelCase__)) = (x[:, 0].min(), x[:, 0].max())
((lowerCamelCase__) , (lowerCamelCase__)) = (x[:, 1].min(), x[:, 1].max())
((lowerCamelCase__) , (lowerCamelCase__)) = np.meshgrid(np.linspace(xa_min, xa_max), np.linspace(xa_min, xa_max))
lowerCamelCase__ = np.c_[xxa.ravel(), xxa.ravel()]
lowerCamelCase__ = predict_prob(grid).reshape(xxa.shape)
plt.contour(xxa, xxa, probs, [0.5], linewidths=1, colors="""black""")
plt.legend()
plt.show() | 69 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
) | 69 | 1 |
from math import factorial
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> float:
if successes > trials:
raise ValueError('successes must be lower or equal to trials' )
if trials < 0 or successes < 0:
raise ValueError('the function is defined for non-negative integers' )
if not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) or not isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise ValueError('the function is defined for non-negative integers' )
if not 0 < prob < 1:
raise ValueError('prob has to be in range of 1 - 0' )
lowerCAmelCase__ : Optional[int] = (prob**successes) * ((1 - prob) ** (trials - successes))
# Calculate the binomial coefficient: n! / k!(n-k)!
lowerCAmelCase__ : Any = float(factorial(SCREAMING_SNAKE_CASE_ ) )
coefficient /= factorial(SCREAMING_SNAKE_CASE_ ) * factorial(trials - successes )
return probability * coefficient
if __name__ == "__main__":
from doctest import testmod
testmod()
print("""Probability of 2 successes out of 4 trails""")
print("""with probability of 0.75 is:""", end=""" """)
print(binomial_distribution(2, 4, 0.75)) | 69 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
lowerCamelCase__ = None
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""vocab_file""": """sentencepiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
"""tokenizer_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/tokenizer.json""",
},
}
lowerCamelCase__ = {
"""google/rembert""": 256,
}
lowerCamelCase__ = """▁"""
class A__ ( __magic_name__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = RemBertTokenizer
def __init__( self : Optional[Any] , a : str=None , a : Any=None , a : List[Any]=True , a : str=True , a : Dict=False , a : Dict="[CLS]" , a : int="[SEP]" , a : Tuple="<unk>" , a : Optional[Any]="[SEP]" , a : Tuple="<pad>" , a : Dict="[CLS]" , a : Optional[Any]="[MASK]" , **a : str , ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
a , tokenizer_file=a , do_lower_case=a , remove_space=a , keep_accents=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , **a , )
lowerCAmelCase__ : int = do_lower_case
lowerCAmelCase__ : int = remove_space
lowerCAmelCase__ : List[Any] = keep_accents
lowerCAmelCase__ : Optional[Any] = vocab_file
lowerCAmelCase__ : Union[str, Any] = False if not self.vocab_file else True
def _lowerCamelCase ( self : Dict , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
lowerCAmelCase__ : Dict = [self.sep_token_id]
lowerCAmelCase__ : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowerCamelCase ( self : str , a : List[int] , a : Optional[List[int]] = None , a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(a )) + [1] + ([0] * len(a )) + [1]
return [1] + ([0] * len(a )) + [1]
def _lowerCamelCase ( self : List[Any] , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = [self.sep_token_id]
lowerCAmelCase__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCamelCase ( self : Tuple , a : str , a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(a ):
logger.error('Vocabulary path ({}) should be a directory'.format(a ) )
return
lowerCAmelCase__ : int = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ):
copyfile(self.vocab_file , a )
return (out_vocab_file,) | 69 | 1 |
import doctest
import logging
import os
import unittest
from pathlib import Path
from typing import List, Union
import transformers
from transformers.testing_utils import require_tf, require_torch, slow
lowerCamelCase__ = logging.getLogger()
@unittest.skip('Temporarily disable the doc tests.' )
@require_torch
@require_tf
@slow
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Dict , a : Path , a : Union[str, None] = None , a : Union[List[str], None] = None , a : Union[str, List[str], None] = None , a : bool = True , ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = [file for file in os.listdir(a ) if os.path.isfile(os.path.join(a , a ) )]
if identifier is not None:
lowerCAmelCase__ : Optional[Any] = [file for file in files if identifier in file]
if n_identifier is not None:
if isinstance(a , a ):
for n_ in n_identifier:
lowerCAmelCase__ : Dict = [file for file in files if n_ not in file]
else:
lowerCAmelCase__ : Dict = [file for file in files if n_identifier not in file]
lowerCAmelCase__ : Union[str, Any] = ignore_files or []
ignore_files.append('__init__.py' )
lowerCAmelCase__ : Any = [file for file in files if file not in ignore_files]
for file in files:
# Open all files
print('Testing' , a )
if only_modules:
lowerCAmelCase__ : int = file.split('.' )[0]
try:
lowerCAmelCase__ : Optional[int] = getattr(a , a )
lowerCAmelCase__ : int = doctest.DocTestSuite(a )
lowerCAmelCase__ : str = unittest.TextTestRunner().run(a )
self.assertIs(len(result.failures ) , 0 )
except AttributeError:
logger.info(f'''{module_identifier} is not a module.''' )
else:
lowerCAmelCase__ : Optional[Any] = doctest.testfile(str('..' / directory / file ) , optionflags=doctest.ELLIPSIS )
self.assertIs(result.failed , 0 )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : str = Path('src/transformers' )
lowerCAmelCase__ : Optional[int] = 'modeling'
lowerCAmelCase__ : Any = [
'modeling_ctrl.py',
'modeling_tf_ctrl.py',
]
self.analyze_directory(a , identifier=a , ignore_files=a )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : str = Path('src/transformers' )
lowerCAmelCase__ : Optional[int] = 'tokenization'
self.analyze_directory(a , identifier=a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = Path('src/transformers' )
lowerCAmelCase__ : str = 'configuration'
self.analyze_directory(a , identifier=a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = Path('src/transformers' )
lowerCAmelCase__ : str = ['configuration', 'modeling', 'tokenization']
self.analyze_directory(a , n_identifier=a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = Path('docs/source' )
lowerCAmelCase__ : Optional[Any] = ['favicon.ico']
self.analyze_directory(a , ignore_files=a , only_modules=a ) | 69 |
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class A__ ( __magic_name__ ):
def __init__( self : List[Any] , a : Callable , a : Optional[Features] = None , a : str = None , a : bool = False , a : bool = False , a : Optional[dict] = None , a : Optional[int] = None , **a : str , ):
'''simple docstring'''
super().__init__(
features=a , cache_dir=a , keep_in_memory=a , streaming=a , num_proc=a , **a , )
lowerCAmelCase__ : int = Generator(
cache_dir=a , features=a , generator=a , gen_kwargs=a , **a , )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
if self.streaming:
lowerCAmelCase__ : List[Any] = self.builder.as_streaming_dataset(split='train' )
# Build regular (map-style) dataset
else:
lowerCAmelCase__ : Any = None
lowerCAmelCase__ : int = None
lowerCAmelCase__ : List[Any] = None
lowerCAmelCase__ : Dict = None
self.builder.download_and_prepare(
download_config=a , download_mode=a , verification_mode=a , base_path=a , num_proc=self.num_proc , )
lowerCAmelCase__ : Union[str, Any] = self.builder.as_dataset(
split='train' , verification_mode=a , in_memory=self.keep_in_memory )
return dataset | 69 | 1 |
from typing import Optional
from .. import Features, NamedSplit
from ..packaged_modules.text.text import Text
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class A__ ( __magic_name__ ):
def __init__( self : int , a : NestedDataStructureLike[PathLike] , a : Optional[NamedSplit] = None , a : Optional[Features] = None , a : str = None , a : bool = False , a : bool = False , a : Optional[int] = None , **a : str , ):
'''simple docstring'''
super().__init__(
a , split=a , features=a , cache_dir=a , keep_in_memory=a , streaming=a , num_proc=a , **a , )
lowerCAmelCase__ : Dict = path_or_paths if isinstance(a , a ) else {self.split: path_or_paths}
lowerCAmelCase__ : Optional[Any] = Text(
cache_dir=a , data_files=a , features=a , **a , )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
if self.streaming:
lowerCAmelCase__ : int = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : Optional[Any] = None
lowerCAmelCase__ : str = None
lowerCAmelCase__ : int = None
self.builder.download_and_prepare(
download_config=a , download_mode=a , verification_mode=a , base_path=a , num_proc=self.num_proc , )
lowerCAmelCase__ : Dict = self.builder.as_dataset(
split=self.split , verification_mode=a , in_memory=self.keep_in_memory )
return dataset | 69 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCamelCase__ = logging.get_logger(__name__)
class A__ ( __magic_name__ ):
lowercase = ['audio_values', 'audio_mask']
def __init__( self : Dict , a : Dict=2_048 , a : Optional[Any]=1 , a : List[Any]=[16, 16] , a : Dict=128 , a : List[str]=44_100 , a : Union[str, Any]=86 , a : Optional[Any]=2_048 , a : List[Any]=0.0 , **a : Tuple , ):
'''simple docstring'''
super().__init__(
feature_size=a , sampling_rate=a , padding_value=a , **a , )
lowerCAmelCase__ : Optional[Any] = spectrogram_length
lowerCAmelCase__ : str = num_channels
lowerCAmelCase__ : Tuple = patch_size
lowerCAmelCase__ : Optional[int] = feature_size // self.patch_size[1]
lowerCAmelCase__ : Union[str, Any] = n_fft
lowerCAmelCase__ : Union[str, Any] = sampling_rate // hop_length_to_sampling_rate
lowerCAmelCase__ : int = sampling_rate
lowerCAmelCase__ : Union[str, Any] = padding_value
lowerCAmelCase__ : Dict = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=a , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=a , norm='slaney' , mel_scale='slaney' , ).T
def _lowerCamelCase ( self : Optional[int] , a : np.array ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = spectrogram(
a , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=8_0.0 , )
lowerCAmelCase__ : Any = log_spec[:, :-1]
lowerCAmelCase__ : Dict = log_spec - 2_0.0
lowerCAmelCase__ : Tuple = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : str , a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a : Optional[Union[str, TensorType]] = None , a : Optional[bool] = True , a : Optional[int] = None , a : bool = False , a : bool = False , **a : int , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
f''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
f''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
lowerCAmelCase__ : Dict = isinstance(a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCAmelCase__ : List[Any] = is_batched_numpy or (
isinstance(a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase__ : int = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(a , np.ndarray ):
lowerCAmelCase__ : Optional[Any] = np.asarray(a , dtype=np.floataa )
elif isinstance(a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase__ : str = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase__ : Tuple = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowerCAmelCase__ : int = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , a ):
lowerCAmelCase__ : Optional[Any] = [np.asarray(a , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowerCAmelCase__ : Optional[Any] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowerCAmelCase__ : Any = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowerCAmelCase__ : List[Any] = np.array(a ).astype(np.floataa )
# convert into correct format for padding
lowerCAmelCase__ : int = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowerCAmelCase__ : Dict = np.ones([len(a ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowerCAmelCase__ : Optional[Any] = padded_audio_features * self.padding_value
for i in range(len(a ) ):
lowerCAmelCase__ : Tuple = audio_features[i]
lowerCAmelCase__ : List[str] = feature
# return as BatchFeature
if return_attention_mask:
lowerCAmelCase__ : Tuple = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
lowerCAmelCase__ : Any = {'audio_values': padded_audio_features}
lowerCAmelCase__ : Any = BatchFeature(data=a , tensor_type=a )
return encoded_inputs | 69 | 1 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> int:
assert column_title.isupper()
lowerCAmelCase__ : Optional[int] = 0
lowerCAmelCase__ : int = len(SCREAMING_SNAKE_CASE_ ) - 1
lowerCAmelCase__ : Union[str, Any] = 0
while index >= 0:
lowerCAmelCase__ : int = (ord(column_title[index] ) - 64) * pow(26 , SCREAMING_SNAKE_CASE_ )
answer += value
power += 1
index -= 1
return answer
if __name__ == "__main__":
from doctest import testmod
testmod() | 69 |
import unittest
from transformers import DonutProcessor
lowerCamelCase__ = """naver-clova-ix/donut-base"""
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = DonutProcessor.from_pretrained(a )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = {
'name': 'John Doe',
'age': '99',
'city': 'Atlanta',
'state': 'GA',
'zip': '30301',
'phone': '123-4567',
'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}],
}
lowerCAmelCase__ : Union[str, Any] = (
'<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'
'<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'
'<s_nicknames><s_nickname>Johnny</s_nickname>'
'<sep/><s_nickname>JD</s_nickname></s_nicknames>'
)
lowerCAmelCase__ : Optional[Any] = self.processor.tokenajson(a )
self.assertDictEqual(a , a ) | 69 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
"""configuration_luke""": ["""LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LukeConfig"""],
"""tokenization_luke""": ["""LukeTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""LUKE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LukeForEntityClassification""",
"""LukeForEntityPairClassification""",
"""LukeForEntitySpanClassification""",
"""LukeForMultipleChoice""",
"""LukeForQuestionAnswering""",
"""LukeForSequenceClassification""",
"""LukeForTokenClassification""",
"""LukeForMaskedLM""",
"""LukeModel""",
"""LukePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 69 |
from numpy import exp, pi, sqrt
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 1.0 ) -> int:
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 69 | 1 |
from ...processing_utils import ProcessorMixin
class A__ ( __magic_name__ ):
lowercase = ['image_processor', 'feature_extractor']
lowercase = 'TvltImageProcessor'
lowercase = 'TvltFeatureExtractor'
def __init__( self : str , a : Union[str, Any] , a : Optional[Any] ):
'''simple docstring'''
super().__init__(image_processor=a , feature_extractor=a )
lowerCAmelCase__ : Optional[Any] = image_processor
lowerCAmelCase__ : Dict = feature_extractor
def __call__( self : Optional[int] , a : Any=None , a : Any=None , a : Optional[int]=None , a : Any=None , a : Optional[Any]=False , a : Optional[int]=False , *a : List[Any] , **a : str , ):
'''simple docstring'''
if images is None and audio is None:
raise ValueError('You need to specify either an `images` or `audio` input to process.' )
lowerCAmelCase__ : Union[str, Any] = None
if images is not None:
lowerCAmelCase__ : Union[str, Any] = self.image_processor(a , mask_pixel=a , *a , **a )
if images_mixed is not None:
lowerCAmelCase__ : Optional[int] = self.image_processor(a , is_mixed=a , *a , **a )
if audio is not None:
lowerCAmelCase__ : Tuple = self.feature_extractor(
a , *a , sampling_rate=a , mask_audio=a , **a )
lowerCAmelCase__ : Tuple = {}
if audio is not None:
output_dict.update(a )
if images is not None:
output_dict.update(a )
if images_mixed_dict is not None:
output_dict.update(a )
return output_dict
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.image_processor.model_input_names
lowerCAmelCase__ : Tuple = self.feature_extractor.model_input_names
return list(dict.fromkeys(image_processor_input_names + feature_extractor_input_names ) ) | 69 |
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = XLMTokenizer
lowercase = False
def _lowerCamelCase ( self : int ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase__ : List[str] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
lowerCAmelCase__ : Any = dict(zip(a , range(len(a ) ) ) )
lowerCAmelCase__ : Optional[int] = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
lowerCAmelCase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(a ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(a ) )
def _lowerCamelCase ( self : List[str] , a : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = 'lower newer'
lowerCAmelCase__ : Any = 'lower newer'
return input_text, output_text
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = XLMTokenizer(self.vocab_file , self.merges_file )
lowerCAmelCase__ : Optional[int] = 'lower'
lowerCAmelCase__ : Optional[Any] = ['low', 'er</w>']
lowerCAmelCase__ : Dict = tokenizer.tokenize(a )
self.assertListEqual(a , a )
lowerCAmelCase__ : Tuple = tokens + ['<unk>']
lowerCAmelCase__ : Optional[int] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
@slow
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = XLMTokenizer.from_pretrained('xlm-mlm-en-2048' )
lowerCAmelCase__ : Any = tokenizer.encode('sequence builders' , add_special_tokens=a )
lowerCAmelCase__ : Union[str, Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=a )
lowerCAmelCase__ : List[Any] = tokenizer.build_inputs_with_special_tokens(a )
lowerCAmelCase__ : List[str] = tokenizer.build_inputs_with_special_tokens(a , a )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1] | 69 | 1 |
import math
class A__ :
def _lowerCamelCase ( self : Union[str, Any] , a : list[list[float]] , a : list[int] ):
'''simple docstring'''
lowerCAmelCase__ : str = 0.0
lowerCAmelCase__ : List[str] = 0.0
for i in range(len(a ) ):
da += math.pow((sample[i] - weights[0][i]) , 2 )
da += math.pow((sample[i] - weights[1][i]) , 2 )
return 0 if da > da else 1
return 0
def _lowerCamelCase ( self : str , a : list[list[int | float]] , a : list[int] , a : int , a : float ):
'''simple docstring'''
for i in range(len(a ) ):
weights[j][i] += alpha * (sample[i] - weights[j][i])
return weights
def lowerCAmelCase__ ( ) -> None:
# Training Examples ( m, n )
lowerCAmelCase__ : List[Any] = [[1, 1, 0, 0], [0, 0, 0, 1], [1, 0, 0, 0], [0, 0, 1, 1]]
# weight initialization ( n, C )
lowerCAmelCase__ : int = [[0.2, 0.6, 0.5, 0.9], [0.8, 0.4, 0.7, 0.3]]
# training
lowerCAmelCase__ : List[str] = SelfOrganizingMap()
lowerCAmelCase__ : Dict = 3
lowerCAmelCase__ : Dict = 0.5
for _ in range(SCREAMING_SNAKE_CASE_ ):
for j in range(len(SCREAMING_SNAKE_CASE_ ) ):
# training sample
lowerCAmelCase__ : Tuple = training_samples[j]
# Compute the winning vector
lowerCAmelCase__ : List[str] = self_organizing_map.get_winner(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Update the winning vector
lowerCAmelCase__ : Dict = self_organizing_map.update(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# classify test sample
lowerCAmelCase__ : Optional[int] = [0, 0, 0, 1]
lowerCAmelCase__ : Tuple = self_organizing_map.get_winner(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# results
print(F'''Clusters that the test sample belongs to : {winner}''' )
print(F'''Weights that have been trained : {weights}''' )
# running the main() function
if __name__ == "__main__":
main() | 69 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[str]:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = image.size
lowerCAmelCase__ , lowerCAmelCase__ : str = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
lowerCAmelCase__ : Any = image.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] )
lowerCAmelCase__ : int = np.array(SCREAMING_SNAKE_CASE_ ).astype(np.floataa ) / 255.0
lowerCAmelCase__ : Optional[int] = image[None].transpose(0 , 3 , 1 , 2 )
lowerCAmelCase__ : List[Any] = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
return 2.0 * image - 1.0
class A__ ( __magic_name__ ):
def __init__( self : List[str] , a : VQModel , a : UNetaDModel , a : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=a , unet=a , scheduler=a )
@torch.no_grad()
def __call__( self : int , a : Union[torch.Tensor, PIL.Image.Image] = None , a : Optional[int] = 1 , a : Optional[int] = 100 , a : Optional[float] = 0.0 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[str] = "pil" , a : bool = True , ):
'''simple docstring'''
if isinstance(a , PIL.Image.Image ):
lowerCAmelCase__ : str = 1
elif isinstance(a , torch.Tensor ):
lowerCAmelCase__ : Union[str, Any] = image.shape[0]
else:
raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(a )}''' )
if isinstance(a , PIL.Image.Image ):
lowerCAmelCase__ : List[Any] = preprocess(a )
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
lowerCAmelCase__ : Optional[int] = (batch_size, self.unet.config.in_channels // 2, height, width)
lowerCAmelCase__ : Optional[Any] = next(self.unet.parameters() ).dtype
lowerCAmelCase__ : List[str] = randn_tensor(a , generator=a , device=self.device , dtype=a )
lowerCAmelCase__ : Any = image.to(device=self.device , dtype=a )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(a , device=self.device )
lowerCAmelCase__ : Optional[Any] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase__ : Optional[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCAmelCase__ : Union[str, Any] = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase__ : List[str] = {}
if accepts_eta:
lowerCAmelCase__ : List[Any] = eta
for t in self.progress_bar(a ):
# concat latents and low resolution image in the channel dimension.
lowerCAmelCase__ : Union[str, Any] = torch.cat([latents, image] , dim=1 )
lowerCAmelCase__ : Dict = self.scheduler.scale_model_input(a , a )
# predict the noise residual
lowerCAmelCase__ : Tuple = self.unet(a , a ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase__ : List[str] = self.scheduler.step(a , a , a , **a ).prev_sample
# decode the image latents with the VQVAE
lowerCAmelCase__ : Dict = self.vqvae.decode(a ).sample
lowerCAmelCase__ : Tuple = torch.clamp(a , -1.0 , 1.0 )
lowerCAmelCase__ : Tuple = image / 2 + 0.5
lowerCAmelCase__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase__ : int = self.numpy_to_pil(a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a ) | 69 | 1 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = 600_851_475_143 ) -> int:
try:
lowerCAmelCase__ : Optional[int] = int(SCREAMING_SNAKE_CASE_ )
except (TypeError, ValueError):
raise TypeError('Parameter n must be int or castable to int.' )
if n <= 0:
raise ValueError('Parameter n must be greater than or equal to one.' )
lowerCAmelCase__ : int = 1
lowerCAmelCase__ : List[str] = 2
while i * i <= n:
while n % i == 0:
lowerCAmelCase__ : Union[str, Any] = i
n //= i
i += 1
if n > 1:
lowerCAmelCase__ : int = n
return int(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
print(F"""{solution() = }""") | 69 |
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class A__ ( __magic_name__ ):
def __init__( self : Union[str, Any] , a : str="" , a : str="train" ):
'''simple docstring'''
assert os.path.isdir(a )
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : Dict = os.listdir(a )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
lowerCAmelCase__ : Union[str, Any] = os.path.join(a , a )
if not os.path.isfile(a ):
continue
self.documents.append(a )
def __len__( self : Any ):
'''simple docstring'''
return len(self.documents )
def __getitem__( self : Dict , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.documents[idx]
lowerCAmelCase__ : Union[str, Any] = document_path.split('/' )[-1]
with open(a , encoding='utf-8' ) as source:
lowerCAmelCase__ : List[Any] = source.read()
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = process_story(a )
return document_name, story_lines, summary_lines
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Tuple:
lowerCAmelCase__ : Optional[int] = list(filter(lambda SCREAMING_SNAKE_CASE_ : len(SCREAMING_SNAKE_CASE_ ) != 0 , [line.strip() for line in raw_story.split('\n' )] ) )
# for some unknown reason some lines miss a period, add it
lowerCAmelCase__ : List[Any] = [_add_missing_period(SCREAMING_SNAKE_CASE_ ) for line in nonempty_lines]
# gather article lines
lowerCAmelCase__ : int = []
lowerCAmelCase__ : Any = deque(SCREAMING_SNAKE_CASE_ )
while True:
try:
lowerCAmelCase__ : int = lines.popleft()
if element.startswith('@highlight' ):
break
story_lines.append(SCREAMING_SNAKE_CASE_ )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
lowerCAmelCase__ : Tuple = list(filter(lambda SCREAMING_SNAKE_CASE_ : not t.startswith('@highlight' ) , SCREAMING_SNAKE_CASE_ ) )
return story_lines, summary_lines
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Any:
lowerCAmelCase__ : int = ['.', '!', '?', '...', '\'', '`', '"', '\u2019', '\u2019', ')']
if line.startswith('@highlight' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
if len(SCREAMING_SNAKE_CASE_ ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(SCREAMING_SNAKE_CASE_ )) )
return sequence
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
lowerCAmelCase__ : str = torch.ones_like(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : int = sequence == pad_token_id
lowerCAmelCase__ : Optional[int] = 0
return mask
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
lowerCAmelCase__ : Any = [tokenizer.encode(SCREAMING_SNAKE_CASE_ ) for line in story_lines]
lowerCAmelCase__ : str = [token for sentence in story_lines_token_ids for token in sentence]
lowerCAmelCase__ : Dict = [tokenizer.encode(SCREAMING_SNAKE_CASE_ ) for line in summary_lines]
lowerCAmelCase__ : str = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
lowerCAmelCase__ : Optional[int] = []
for sequence in batch:
lowerCAmelCase__ : Union[str, Any] = -1
lowerCAmelCase__ : int = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(SCREAMING_SNAKE_CASE_ )
return torch.tensor(SCREAMING_SNAKE_CASE_ ) | 69 | 1 |
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
lowerCamelCase__ = 8
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=BITS ) -> Dict:
lowerCAmelCase__ : Tuple = x.device
lowerCAmelCase__ : int = (x * 255).int().clamp(0 , 255 )
lowerCAmelCase__ : int = 2 ** torch.arange(bits - 1 , -1 , -1 , device=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : List[Any] = rearrange(SCREAMING_SNAKE_CASE_ , 'd -> d 1 1' )
lowerCAmelCase__ : int = rearrange(SCREAMING_SNAKE_CASE_ , 'b c h w -> b c 1 h w' )
lowerCAmelCase__ : List[Any] = ((x & mask) != 0).float()
lowerCAmelCase__ : Any = rearrange(SCREAMING_SNAKE_CASE_ , 'b c d h w -> b (c d) h w' )
lowerCAmelCase__ : str = bits * 2 - 1
return bits
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=BITS ) -> Union[str, Any]:
lowerCAmelCase__ : int = x.device
lowerCAmelCase__ : Any = (x > 0).int()
lowerCAmelCase__ : str = 2 ** torch.arange(bits - 1 , -1 , -1 , device=SCREAMING_SNAKE_CASE_ , dtype=torch.intaa )
lowerCAmelCase__ : int = rearrange(SCREAMING_SNAKE_CASE_ , 'd -> d 1 1' )
lowerCAmelCase__ : List[str] = rearrange(SCREAMING_SNAKE_CASE_ , 'b (c d) h w -> b c d h w' , d=8 )
lowerCAmelCase__ : Dict = reduce(x * mask , 'b c d h w -> b c h w' , 'sum' )
return (dec / 255).clamp(0.0 , 1.0 )
def lowerCAmelCase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = True , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_ = True , ) -> Union[DDIMSchedulerOutput, Tuple]:
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
lowerCAmelCase__ : Optional[int] = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
lowerCAmelCase__ : Union[str, Any] = self.alphas_cumprod[timestep]
lowerCAmelCase__ : Union[str, Any] = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
lowerCAmelCase__ : int = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCAmelCase__ : Tuple = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
lowerCAmelCase__ : str = self.bit_scale
if self.config.clip_sample:
lowerCAmelCase__ : int = torch.clamp(SCREAMING_SNAKE_CASE_ , -scale , SCREAMING_SNAKE_CASE_ )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
lowerCAmelCase__ : List[Any] = self._get_variance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
lowerCAmelCase__ : int = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCAmelCase__ : Union[str, Any] = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
lowerCAmelCase__ : str = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
lowerCAmelCase__ : Union[str, Any] = model_output.device if torch.is_tensor(SCREAMING_SNAKE_CASE_ ) else 'cpu'
lowerCAmelCase__ : str = torch.randn(model_output.shape , dtype=model_output.dtype , generator=SCREAMING_SNAKE_CASE_ ).to(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Union[str, Any] = self._get_variance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ** 0.5 * eta * noise
lowerCAmelCase__ : int = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE_ , pred_original_sample=SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( self , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_="epsilon" , SCREAMING_SNAKE_CASE_=None , SCREAMING_SNAKE_CASE_ = True , ) -> Union[DDPMSchedulerOutput, Tuple]:
lowerCAmelCase__ : Optional[int] = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = torch.split(SCREAMING_SNAKE_CASE_ , sample.shape[1] , dim=1 )
else:
lowerCAmelCase__ : str = None
# 1. compute alphas, betas
lowerCAmelCase__ : str = self.alphas_cumprod[t]
lowerCAmelCase__ : Optional[Any] = self.alphas_cumprod[t - 1] if t > 0 else self.one
lowerCAmelCase__ : Dict = 1 - alpha_prod_t
lowerCAmelCase__ : Optional[Any] = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
lowerCAmelCase__ : int = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
lowerCAmelCase__ : str = model_output
else:
raise ValueError(F'''Unsupported prediction_type {prediction_type}.''' )
# 3. Clip "predicted x_0"
lowerCAmelCase__ : Any = self.bit_scale
if self.config.clip_sample:
lowerCAmelCase__ : str = torch.clamp(SCREAMING_SNAKE_CASE_ , -scale , SCREAMING_SNAKE_CASE_ )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase__ : List[str] = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
lowerCAmelCase__ : int = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
lowerCAmelCase__ : Tuple = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
lowerCAmelCase__ : Dict = 0
if t > 0:
lowerCAmelCase__ : Dict = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=SCREAMING_SNAKE_CASE_ ).to(model_output.device )
lowerCAmelCase__ : List[str] = (self._get_variance(SCREAMING_SNAKE_CASE_ , predicted_variance=SCREAMING_SNAKE_CASE_ ) ** 0.5) * noise
lowerCAmelCase__ : Tuple = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=SCREAMING_SNAKE_CASE_ , pred_original_sample=SCREAMING_SNAKE_CASE_ )
class A__ ( __magic_name__ ):
def __init__( self : str , a : UNetaDConditionModel , a : Union[DDIMScheduler, DDPMScheduler] , a : Optional[float] = 1.0 , ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ : Union[str, Any] = bit_scale
lowerCAmelCase__ : Tuple = (
ddim_bit_scheduler_step if isinstance(a , a ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=a , scheduler=a )
@torch.no_grad()
def __call__( self : List[Any] , a : Optional[int] = 256 , a : Optional[int] = 256 , a : Optional[int] = 50 , a : Optional[torch.Generator] = None , a : Optional[int] = 1 , a : Optional[str] = "pil" , a : bool = True , **a : List[Any] , ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) , generator=a , )
lowerCAmelCase__ : Optional[int] = decimal_to_bits(a ) * self.bit_scale
lowerCAmelCase__ : Optional[Any] = latents.to(self.device )
self.scheduler.set_timesteps(a )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
lowerCAmelCase__ : int = self.unet(a , a ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase__ : Dict = self.scheduler.step(a , a , a ).prev_sample
lowerCAmelCase__ : Tuple = bits_to_decimal(a )
if output_type == "pil":
lowerCAmelCase__ : List[Any] = self.numpy_to_pil(a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a ) | 69 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCamelCase__ = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase__ = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowerCamelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCamelCase__ = re.compile(r"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
lowerCamelCase__ = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[str]:
lowerCAmelCase__ : int = None
# source code of `config_class`
lowerCAmelCase__ : Optional[int] = inspect.getsource(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = _re_checkpoint.findall(SCREAMING_SNAKE_CASE_ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('/' ):
lowerCAmelCase__ : Union[str, Any] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
lowerCAmelCase__ : Dict = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
lowerCAmelCase__ : str = ckpt_name
break
return checkpoint
def lowerCAmelCase__ ( ) -> int:
lowerCAmelCase__ : Union[str, Any] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
lowerCAmelCase__ : Union[str, Any] = get_checkpoint_from_config_class(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[Any] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
lowerCAmelCase__ : List[str] = '\n'.join(sorted(SCREAMING_SNAKE_CASE_ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints() | 69 | 1 |
from __future__ import annotations
import math
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> list[int]:
if num <= 0:
lowerCAmelCase__ : Dict = F'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = [True] * (num + 1)
lowerCAmelCase__ : str = []
lowerCAmelCase__ : Optional[Any] = 2
lowerCAmelCase__ : List[str] = int(math.sqrt(SCREAMING_SNAKE_CASE_ ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(SCREAMING_SNAKE_CASE_ )
# Set multiples of start be False
for i in range(start * start , num + 1 , SCREAMING_SNAKE_CASE_ ):
if sieve[i] is True:
lowerCAmelCase__ : Dict = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(SCREAMING_SNAKE_CASE_ )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("""Enter a positive integer: """).strip()))) | 69 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
"""configuration_luke""": ["""LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LukeConfig"""],
"""tokenization_luke""": ["""LukeTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""LUKE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LukeForEntityClassification""",
"""LukeForEntityPairClassification""",
"""LukeForEntitySpanClassification""",
"""LukeForMultipleChoice""",
"""LukeForQuestionAnswering""",
"""LukeForSequenceClassification""",
"""LukeForTokenClassification""",
"""LukeForMaskedLM""",
"""LukeModel""",
"""LukePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 69 | 1 |
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class A__ ( unittest.TestCase ):
def __init__( self : Optional[Any] , a : List[Any] , a : Dict=13 , a : Optional[int]=30 , a : int=2 , a : Any=3 , a : Optional[Any]=True , a : Union[str, Any]=True , a : Optional[Any]=32 , a : Any=5 , a : Dict=4 , a : Dict=37 , a : List[Any]="gelu" , a : Any=0.1 , a : List[Any]=0.1 , a : List[Any]=10 , a : List[str]=0.0_2 , ):
'''simple docstring'''
lowerCAmelCase__ : Dict = parent
lowerCAmelCase__ : Optional[int] = batch_size
lowerCAmelCase__ : Optional[int] = image_size
lowerCAmelCase__ : Any = patch_size
lowerCAmelCase__ : List[Any] = num_channels
lowerCAmelCase__ : Optional[int] = is_training
lowerCAmelCase__ : int = use_labels
lowerCAmelCase__ : List[Any] = hidden_size
lowerCAmelCase__ : List[Any] = num_hidden_layers
lowerCAmelCase__ : Union[str, Any] = num_attention_heads
lowerCAmelCase__ : Union[str, Any] = intermediate_size
lowerCAmelCase__ : Optional[Any] = hidden_act
lowerCAmelCase__ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase__ : Union[str, Any] = type_sequence_label_size
lowerCAmelCase__ : Optional[int] = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase__ : Any = (image_size // patch_size) ** 2
lowerCAmelCase__ : Any = num_patches + 1
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : Union[str, Any] = ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=a , initializer_range=self.initializer_range , )
return config, pixel_values
def _lowerCamelCase ( self : Union[str, Any] , a : Tuple , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = FlaxViTModel(config=a )
lowerCAmelCase__ : Any = model(a )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
lowerCAmelCase__ : Dict = (self.image_size, self.image_size)
lowerCAmelCase__ : List[str] = (self.patch_size, self.patch_size)
lowerCAmelCase__ : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, num_patches + 1, self.hidden_size) )
def _lowerCamelCase ( self : Optional[int] , a : Optional[int] , a : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.type_sequence_label_size
lowerCAmelCase__ : Dict = FlaxViTForImageClassification(config=a )
lowerCAmelCase__ : Any = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
lowerCAmelCase__ : List[Any] = 1
lowerCAmelCase__ : Optional[Any] = FlaxViTForImageClassification(a )
lowerCAmelCase__ : List[str] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ : List[str] = model(a )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Union[str, Any] = config_and_inputs
lowerCAmelCase__ : int = {'pixel_values': pixel_values}
return config, inputs_dict
@require_flax
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Dict = FlaxViTModelTester(self )
lowerCAmelCase__ : List[Any] = ConfigTester(self , config_class=a , has_text_modality=a , hidden_size=37 )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : int = model_class(a )
lowerCAmelCase__ : Union[str, Any] = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : Tuple = [*signature.parameters.keys()]
lowerCAmelCase__ : Union[str, Any] = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
lowerCAmelCase__ : List[str] = self._prepare_for_class(a , a )
lowerCAmelCase__ : Any = model_class(a )
@jax.jit
def model_jitted(a : Union[str, Any] , **a : List[str] ):
return model(pixel_values=a , **a )
with self.subTest('JIT Enabled' ):
lowerCAmelCase__ : List[str] = model_jitted(**a ).to_tuple()
with self.subTest('JIT Disabled' ):
with jax.disable_jit():
lowerCAmelCase__ : Union[str, Any] = model_jitted(**a ).to_tuple()
self.assertEqual(len(a ) , len(a ) )
for jitted_output, output in zip(a , a ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _lowerCamelCase ( self : str ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
lowerCAmelCase__ : Dict = model_class_name.from_pretrained('google/vit-base-patch16-224' )
lowerCAmelCase__ : Tuple = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(a ) | 69 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {
"""configuration_chinese_clip""": [
"""CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ChineseCLIPConfig""",
"""ChineseCLIPOnnxConfig""",
"""ChineseCLIPTextConfig""",
"""ChineseCLIPVisionConfig""",
],
"""processing_chinese_clip""": ["""ChineseCLIPProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""ChineseCLIPFeatureExtractor"""]
lowerCamelCase__ = ["""ChineseCLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ChineseCLIPModel""",
"""ChineseCLIPPreTrainedModel""",
"""ChineseCLIPTextModel""",
"""ChineseCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 69 | 1 |
from math import pi
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> float:
return 2 * pi * radius * (angle / 360)
if __name__ == "__main__":
print(arc_length(90, 10)) | 69 |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
lowerCamelCase__ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
lowerCamelCase__ = """cuda""" if torch.cuda.is_available() else """cpu"""
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=100 , SCREAMING_SNAKE_CASE_=" " ) -> List[str]:
lowerCAmelCase__ : Optional[Any] = text.split(SCREAMING_SNAKE_CASE_ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> dict:
lowerCAmelCase__ , lowerCAmelCase__ : int = [], []
for title, text in zip(documents['title'] , documents['text'] ):
if text is not None:
for passage in split_text(SCREAMING_SNAKE_CASE_ ):
titles.append(title if title is not None else '' )
texts.append(SCREAMING_SNAKE_CASE_ )
return {"title": titles, "text": texts}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> dict:
lowerCAmelCase__ : List[str] = ctx_tokenizer(
documents['title'] , documents['text'] , truncation=SCREAMING_SNAKE_CASE_ , padding='longest' , return_tensors='pt' )['input_ids']
lowerCAmelCase__ : Tuple = ctx_encoder(input_ids.to(device=SCREAMING_SNAKE_CASE_ ) , return_dict=SCREAMING_SNAKE_CASE_ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]:
######################################
logger.info('Step 1 - Create the dataset' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowerCAmelCase__ : str = load_dataset(
'csv' , data_files=[rag_example_args.csv_path] , split='train' , delimiter='\t' , column_names=['title', 'text'] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowerCAmelCase__ : Optional[Any] = dataset.map(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , num_proc=processing_args.num_proc )
# And compute the embeddings
lowerCAmelCase__ : List[str] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
lowerCAmelCase__ : List[Any] = Features(
{'text': Value('string' ), 'title': Value('string' ), 'embeddings': Sequence(Value('float32' ) )} ) # optional, save as float32 instead of float64 to save space
lowerCAmelCase__ : List[Any] = dataset.map(
partial(SCREAMING_SNAKE_CASE_ , ctx_encoder=SCREAMING_SNAKE_CASE_ , ctx_tokenizer=SCREAMING_SNAKE_CASE_ ) , batched=SCREAMING_SNAKE_CASE_ , batch_size=processing_args.batch_size , features=SCREAMING_SNAKE_CASE_ , )
# And finally save your dataset
lowerCAmelCase__ : Optional[Any] = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset' )
dataset.save_to_disk(SCREAMING_SNAKE_CASE_ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('Step 2 - Index the dataset' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowerCAmelCase__ : Optional[int] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('embeddings' , custom_index=SCREAMING_SNAKE_CASE_ )
# And save the index
lowerCAmelCase__ : str = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset_hnsw_index.faiss' )
dataset.get_index('embeddings' ).save(SCREAMING_SNAKE_CASE_ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class A__ :
lowercase = field(
default=str(Path(__magic_name__ ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ) , metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} , )
lowercase = field(
default=__magic_name__ , metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} , )
lowercase = field(
default='facebook/rag-sequence-nq' , metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} , )
lowercase = field(
default='facebook/dpr-ctx_encoder-multiset-base' , metadata={
'help': (
'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'
' \'facebook/dpr-ctx_encoder-multiset-base\''
)
} , )
lowercase = field(
default=str(Path(__magic_name__ ).parent / 'test_run' / 'dummy-kb' ) , metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} , )
@dataclass
class A__ :
lowercase = field(
default=__magic_name__ , metadata={
'help': 'The number of processes to use to split the documents into passages. Default is single process.'
} , )
lowercase = field(
default=16 , metadata={
'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.'
} , )
@dataclass
class A__ :
lowercase = field(
default=768 , metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} , )
lowercase = field(
default=128 , metadata={
'help': (
'The number of bi-directional links created for every new element during the HNSW index construction.'
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
lowerCamelCase__ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
lowerCamelCase__ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args) | 69 | 1 |
from string import ascii_uppercase
lowerCamelCase__ = {str(ord(c) - 55): c for c in ascii_uppercase}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError('int() can\'t convert non-string with explicit base' )
if num < 0:
raise ValueError('parameter must be positive int' )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError('\'str\' object cannot be interpreted as an integer' )
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError('\'float\' object cannot be interpreted as an integer' )
if base in (0, 1):
raise ValueError('base must be >= 2' )
if base > 36:
raise ValueError('base must be <= 36' )
lowerCAmelCase__ : Union[str, Any] = ''
lowerCAmelCase__ : Dict = 0
lowerCAmelCase__ : int = 0
while div != 1:
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = divmod(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
if base >= 11 and 9 < mod < 36:
lowerCAmelCase__ : Optional[Any] = ALPHABET_VALUES[str(SCREAMING_SNAKE_CASE_ )]
else:
lowerCAmelCase__ : List[str] = str(SCREAMING_SNAKE_CASE_ )
new_value += actual_value
lowerCAmelCase__ : List[str] = num // base
lowerCAmelCase__ : Optional[int] = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(SCREAMING_SNAKE_CASE_ )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
) | 69 |
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A__ ( __magic_name__ ):
lowercase = (DDPMParallelScheduler,)
def _lowerCamelCase ( self : str , **a : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : str = {
'num_train_timesteps': 1_000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**a )
return config
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=a , beta_end=a )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=a )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
self.check_over_configs(thresholding=a )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=a , prediction_type=a , sample_max_value=a , )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=a )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.scheduler_classes[0]
lowerCAmelCase__ : Any = self.get_scheduler_config()
lowerCAmelCase__ : List[str] = scheduler_class(**a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1E-5
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.scheduler_classes[0]
lowerCAmelCase__ : Any = self.get_scheduler_config()
lowerCAmelCase__ : int = scheduler_class(**a )
lowerCAmelCase__ : str = len(a )
lowerCAmelCase__ : Tuple = self.dummy_model()
lowerCAmelCase__ : Optional[Any] = self.dummy_sample_deter
lowerCAmelCase__ : int = self.dummy_sample_deter + 0.1
lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter - 0.1
lowerCAmelCase__ : Tuple = samplea.shape[0]
lowerCAmelCase__ : List[Any] = torch.stack([samplea, samplea, samplea] , dim=0 )
lowerCAmelCase__ : Optional[Any] = torch.arange(a )[0:3, None].repeat(1 , a )
lowerCAmelCase__ : List[str] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
lowerCAmelCase__ : Tuple = scheduler.batch_step_no_noise(a , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
lowerCAmelCase__ : str = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Any = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1E-2
assert abs(result_mean.item() - 0.5_0_0_5 ) < 1E-3
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : str = self.scheduler_classes[0]
lowerCAmelCase__ : List[Any] = self.get_scheduler_config()
lowerCAmelCase__ : Dict = scheduler_class(**a )
lowerCAmelCase__ : str = len(a )
lowerCAmelCase__ : Any = self.dummy_model()
lowerCAmelCase__ : int = self.dummy_sample_deter
lowerCAmelCase__ : Tuple = torch.manual_seed(0 )
for t in reversed(range(a ) ):
# 1. predict noise residual
lowerCAmelCase__ : Optional[Any] = model(a , a )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase__ : int = scheduler.step(a , a , a , generator=a ).prev_sample
lowerCAmelCase__ : List[str] = pred_prev_sample
lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1E-3
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = self.scheduler_classes[0]
lowerCAmelCase__ : Dict = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCAmelCase__ : int = scheduler_class(**a )
lowerCAmelCase__ : str = len(a )
lowerCAmelCase__ : Optional[int] = self.dummy_model()
lowerCAmelCase__ : List[str] = self.dummy_sample_deter
lowerCAmelCase__ : Optional[Any] = torch.manual_seed(0 )
for t in reversed(range(a ) ):
# 1. predict noise residual
lowerCAmelCase__ : List[Any] = model(a , a )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase__ : Optional[int] = scheduler.step(a , a , a , generator=a ).prev_sample
lowerCAmelCase__ : str = pred_prev_sample
lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Any = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1E-3
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.scheduler_classes[0]
lowerCAmelCase__ : Any = self.get_scheduler_config()
lowerCAmelCase__ : Optional[int] = scheduler_class(**a )
lowerCAmelCase__ : Optional[Any] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=a )
lowerCAmelCase__ : List[Any] = scheduler.timesteps
for i, timestep in enumerate(a ):
if i == len(a ) - 1:
lowerCAmelCase__ : Tuple = -1
else:
lowerCAmelCase__ : Dict = timesteps[i + 1]
lowerCAmelCase__ : str = scheduler.previous_timestep(a )
lowerCAmelCase__ : int = prev_t.item()
self.assertEqual(a , a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.scheduler_classes[0]
lowerCAmelCase__ : Optional[int] = self.get_scheduler_config()
lowerCAmelCase__ : Optional[Any] = scheduler_class(**a )
lowerCAmelCase__ : str = [100, 87, 50, 51, 0]
with self.assertRaises(a , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=a )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.scheduler_classes[0]
lowerCAmelCase__ : str = self.get_scheduler_config()
lowerCAmelCase__ : Optional[int] = scheduler_class(**a )
lowerCAmelCase__ : str = [100, 87, 50, 1, 0]
lowerCAmelCase__ : int = len(a )
with self.assertRaises(a , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=a , timesteps=a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.scheduler_classes[0]
lowerCAmelCase__ : Dict = self.get_scheduler_config()
lowerCAmelCase__ : Optional[int] = scheduler_class(**a )
lowerCAmelCase__ : str = [scheduler.config.num_train_timesteps]
with self.assertRaises(
a , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=a ) | 69 | 1 |
import json
import os
import unittest
from transformers.models.gptsan_japanese.tokenization_gptsan_japanese import (
VOCAB_FILES_NAMES,
GPTSanJapaneseTokenizer,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = GPTSanJapaneseTokenizer
lowercase = False
lowercase = {'do_clean_text': False, 'add_prefix_space': False}
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
super().setUp()
# fmt: off
lowerCAmelCase__ : Union[str, Any] = ['こん', 'こんに', 'にちは', 'ばんは', '世界,㔺界', '、', '。', '<BR>', '<SP>', '<TAB>', '<URL>', '<EMAIL>', '<TEL>', '<DATE>', '<PRICE>', '<BLOCK>', '<KIGOU>', '<U2000U2BFF>', '<|emoji1|>', '<unk>', '<|bagoftoken|>', '<|endoftext|>']
# fmt: on
lowerCAmelCase__ : List[Any] = {'emoji': {'\ud83d\ude00': '<|emoji1|>'}, 'emoji_inv': {'<|emoji1|>': '\ud83d\ude00'}} # 😀
lowerCAmelCase__ : Optional[Any] = {'unk_token': '<unk>'}
lowerCAmelCase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['emoji_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.emoji_file , 'w' ) as emoji_writer:
emoji_writer.write(json.dumps(a ) )
def _lowerCamelCase ( self : int , **a : Tuple ):
'''simple docstring'''
kwargs.update(self.special_tokens_map )
return GPTSanJapaneseTokenizer.from_pretrained(self.tmpdirname , **a )
def _lowerCamelCase ( self : str , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : int = 'こんにちは、世界。 \nこんばんは、㔺界。😀'
lowerCAmelCase__ : List[Any] = 'こんにちは、世界。 \nこんばんは、世界。😀'
return input_text, output_text
def _lowerCamelCase ( self : List[str] , a : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Any = self.get_input_output_texts(a )
lowerCAmelCase__ : List[Any] = tokenizer.encode(a , add_special_tokens=a )
lowerCAmelCase__ : Any = tokenizer.decode(a , clean_up_tokenization_spaces=a )
return text, ids
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
pass # TODO add if relevant
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
pass # TODO add if relevant
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
pass # TODO add if relevant
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.get_tokenizer()
# Testing tokenization
lowerCAmelCase__ : Any = 'こんにちは、世界。 こんばんは、㔺界。'
lowerCAmelCase__ : str = ['こん', 'にちは', '、', '世界', '。', '<SP>', 'こん', 'ばんは', '、', '㔺界', '。']
lowerCAmelCase__ : int = tokenizer.tokenize(a )
self.assertListEqual(a , a )
# Testing conversion to ids without special tokens
lowerCAmelCase__ : Tuple = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6]
lowerCAmelCase__ : Optional[Any] = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(a , a )
# Testing conversion to ids with special tokens
lowerCAmelCase__ : Optional[Any] = tokens + [tokenizer.unk_token]
lowerCAmelCase__ : Union[str, Any] = [0, 2, 5, 4, 6, 8, 0, 3, 5, 4, 6, 19]
lowerCAmelCase__ : Optional[Any] = tokenizer.convert_tokens_to_ids(a )
self.assertListEqual(a , a )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.get_tokenizer()
# Testing tokenization
lowerCAmelCase__ : int = 'こんにちは、<|bagoftoken|>世界。こんばんは、<|bagoftoken|>㔺界。'
lowerCAmelCase__ : List[str] = 'こんにちは、、、、世界。こんばんは、、、、世界。'
lowerCAmelCase__ : Optional[Any] = tokenizer.encode(a )
lowerCAmelCase__ : List[Any] = tokenizer.decode(a )
self.assertEqual(a , a )
@slow
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
lowerCAmelCase__ : Union[str, Any] = 'こんにちは、世界。'
lowerCAmelCase__ : Optional[int] = 'こんばんは、㔺界。😀'
lowerCAmelCase__ : Dict = 'こんにちは、世界。こんばんは、世界。😀'
lowerCAmelCase__ : str = tokenizer.encode(prefix_text + input_text )
lowerCAmelCase__ : Any = tokenizer.encode('' , prefix_text=prefix_text + input_text )
lowerCAmelCase__ : Dict = tokenizer.encode(a , prefix_text=a )
lowerCAmelCase__ : List[str] = tokenizer.decode(a )
lowerCAmelCase__ : Any = tokenizer.decode(a )
lowerCAmelCase__ : Union[str, Any] = tokenizer.decode(a )
self.assertEqual(a , a )
self.assertEqual(a , a )
self.assertEqual(a , a )
@slow
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
# Testing tokenization
lowerCAmelCase__ : Union[str, Any] = 'こんにちは、世界。'
lowerCAmelCase__ : str = 'こんばんは、㔺界。😀'
lowerCAmelCase__ : Optional[Any] = len(tokenizer.encode(a ) ) - 2
lowerCAmelCase__ : Optional[int] = len(tokenizer.encode(a ) ) - 2
lowerCAmelCase__ : int = [1] + [0] * (len_prefix + len_text + 1)
lowerCAmelCase__ : Union[str, Any] = [1] * (len_prefix + len_text + 1) + [0]
lowerCAmelCase__ : Dict = [1] + [1] * (len_prefix) + [0] * (len_text + 1)
lowerCAmelCase__ : Optional[int] = tokenizer(prefix_text + input_text ).token_type_ids
lowerCAmelCase__ : List[Any] = tokenizer('' , prefix_text=prefix_text + input_text ).token_type_ids
lowerCAmelCase__ : List[str] = tokenizer(a , prefix_text=a ).token_type_ids
self.assertListEqual(a , a )
self.assertListEqual(a , a )
self.assertListEqual(a , a )
@slow
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
lowerCAmelCase__ : str = tokenizer.encode('あンいワ' )
lowerCAmelCase__ : str = tokenizer.encode('' , prefix_text='あンいワ' )
lowerCAmelCase__ : List[Any] = tokenizer.encode('いワ' , prefix_text='あン' )
self.assertEqual(tokenizer.decode(a ) , tokenizer.decode(a ) )
self.assertEqual(tokenizer.decode(a ) , tokenizer.decode(a ) )
self.assertNotEqual(a , a )
self.assertNotEqual(a , a )
self.assertEqual(x_token_a[1] , x_token_a[-1] ) # SEG token
self.assertEqual(x_token_a[1] , x_token_a[3] ) # SEG token
@slow
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.tokenizer_class.from_pretrained('Tanrei/GPTSAN-japanese' )
lowerCAmelCase__ : Optional[Any] = [['武田信玄', 'は、'], ['織田信長', 'の配下の、']]
lowerCAmelCase__ : Any = tokenizer(a , padding=a )
lowerCAmelCase__ : int = tokenizer.batch_encode_plus(a , padding=a )
# fmt: off
lowerCAmelCase__ : int = [[35_993, 8_640, 25_948, 35_998, 30_647, 35_675, 35_999, 35_999], [35_993, 10_382, 9_868, 35_998, 30_646, 9_459, 30_646, 35_675]]
lowerCAmelCase__ : Union[str, Any] = [[1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 0, 0, 0, 0, 0]]
lowerCAmelCase__ : str = [[1, 1, 1, 1, 1, 1, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1]]
# fmt: on
self.assertListEqual(x_token.input_ids , a )
self.assertListEqual(x_token.token_type_ids , a )
self.assertListEqual(x_token.attention_mask , a )
self.assertListEqual(x_token_a.input_ids , a )
self.assertListEqual(x_token_a.token_type_ids , a )
self.assertListEqual(x_token_a.attention_mask , a )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
pass | 69 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( __magic_name__ ):
lowercase = ['image_processor', 'tokenizer']
lowercase = 'LayoutLMv3ImageProcessor'
lowercase = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self : Optional[int] , a : Union[str, Any]=None , a : Optional[Any]=None , **a : str ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , a , )
lowerCAmelCase__ : int = kwargs.pop('feature_extractor' )
lowerCAmelCase__ : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(a , a )
def __call__( self : List[Any] , a : List[Any] , a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , a : Union[List[List[int]], List[List[List[int]]]] = None , a : Optional[Union[List[int], List[List[int]]]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = None , a : Optional[int] = None , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : str , ):
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
# first, apply the image processor
lowerCAmelCase__ : List[str] = self.image_processor(images=a , return_tensors=a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(a , a ):
lowerCAmelCase__ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCAmelCase__ : List[str] = features['words']
lowerCAmelCase__ : List[Any] = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_token_type_ids=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_length=a , verbose=a , return_tensors=a , **a , )
# add pixel values
lowerCAmelCase__ : Tuple = features.pop('pixel_values' )
if return_overflowing_tokens is True:
lowerCAmelCase__ : List[str] = self.get_overflowing_images(a , encoded_inputs['overflow_to_sample_mapping'] )
lowerCAmelCase__ : List[str] = images
return encoded_inputs
def _lowerCamelCase ( self : Any , a : List[str] , a : int ):
'''simple docstring'''
lowerCAmelCase__ : int = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(a ) != len(a ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f''' {len(a )} and {len(a )}''' )
return images_with_overflow
def _lowerCamelCase ( self : Union[str, Any] , *a : Optional[Any] , **a : List[str] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a , **a )
def _lowerCamelCase ( self : Tuple , *a : List[str] , **a : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*a , **a )
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , a , )
return self.image_processor_class
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , a , )
return self.image_processor | 69 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""google/mobilenet_v2_1.4_224""": """https://huggingface.co/google/mobilenet_v2_1.4_224/resolve/main/config.json""",
"""google/mobilenet_v2_1.0_224""": """https://huggingface.co/google/mobilenet_v2_1.0_224/resolve/main/config.json""",
"""google/mobilenet_v2_0.75_160""": """https://huggingface.co/google/mobilenet_v2_0.75_160/resolve/main/config.json""",
"""google/mobilenet_v2_0.35_96""": """https://huggingface.co/google/mobilenet_v2_0.35_96/resolve/main/config.json""",
# See all MobileNetV2 models at https://huggingface.co/models?filter=mobilenet_v2
}
class A__ ( __magic_name__ ):
lowercase = 'mobilenet_v2'
def __init__( self : int , a : Tuple=3 , a : int=224 , a : str=1.0 , a : Tuple=8 , a : Any=8 , a : Optional[Any]=6 , a : Any=32 , a : int=True , a : Optional[int]=True , a : Tuple="relu6" , a : int=True , a : Optional[Any]=0.8 , a : Dict=0.0_2 , a : str=0.0_0_1 , a : int=255 , **a : Optional[int] , ):
'''simple docstring'''
super().__init__(**a )
if depth_multiplier <= 0:
raise ValueError('depth_multiplier must be greater than zero.' )
lowerCAmelCase__ : Optional[Any] = num_channels
lowerCAmelCase__ : str = image_size
lowerCAmelCase__ : List[Any] = depth_multiplier
lowerCAmelCase__ : Dict = depth_divisible_by
lowerCAmelCase__ : Optional[int] = min_depth
lowerCAmelCase__ : Dict = expand_ratio
lowerCAmelCase__ : Tuple = output_stride
lowerCAmelCase__ : Union[str, Any] = first_layer_is_expansion
lowerCAmelCase__ : Tuple = finegrained_output
lowerCAmelCase__ : Optional[int] = hidden_act
lowerCAmelCase__ : List[str] = tf_padding
lowerCAmelCase__ : int = classifier_dropout_prob
lowerCAmelCase__ : int = initializer_range
lowerCAmelCase__ : Any = layer_norm_eps
lowerCAmelCase__ : Optional[int] = semantic_loss_ignore_index
class A__ ( __magic_name__ ):
lowercase = version.parse('1.11' )
@property
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
return OrderedDict([('pixel_values', {0: 'batch'})] )
@property
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
if self.task == "image-classification":
return OrderedDict([('logits', {0: 'batch'})] )
else:
return OrderedDict([('last_hidden_state', {0: 'batch'}), ('pooler_output', {0: 'batch'})] )
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
return 1E-4 | 69 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class A__ ( __magic_name__ ):
def __init__( self : List[str] , a : Optional[Any] , a : int=13 , a : str=7 , a : Any=True , a : List[str]=True , a : Any=False , a : List[Any]=True , a : List[str]=99 , a : Optional[Any]=32 , a : List[str]=5 , a : List[Any]=4 , a : List[Any]=64 , a : List[Any]="gelu" , a : List[Any]=0.1 , a : List[Any]=0.1 , a : int=512 , a : Tuple=16 , a : List[str]=2 , a : int=0.0_2 , a : Union[str, Any]=3 , a : Any=4 , a : Union[str, Any]=None , a : Union[str, Any]=2 , a : List[str]=2 , a : int=2 , a : Dict=2 , a : List[str]=4 , a : str=1 , ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = parent
lowerCAmelCase__ : int = batch_size
lowerCAmelCase__ : str = seq_length
lowerCAmelCase__ : Tuple = is_training
lowerCAmelCase__ : List[str] = use_input_mask
lowerCAmelCase__ : Optional[int] = use_token_type_ids
lowerCAmelCase__ : Any = use_labels
lowerCAmelCase__ : List[Any] = vocab_size
lowerCAmelCase__ : str = hidden_size
lowerCAmelCase__ : str = num_hidden_layers
lowerCAmelCase__ : List[str] = num_attention_heads
lowerCAmelCase__ : int = intermediate_size
lowerCAmelCase__ : Optional[int] = hidden_act
lowerCAmelCase__ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase__ : Union[str, Any] = max_position_embeddings
lowerCAmelCase__ : Optional[int] = type_vocab_size
lowerCAmelCase__ : Dict = type_sequence_label_size
lowerCAmelCase__ : Optional[int] = initializer_range
lowerCAmelCase__ : List[Any] = num_labels
lowerCAmelCase__ : Any = num_choices
lowerCAmelCase__ : str = scope
lowerCAmelCase__ : Any = q_groups
lowerCAmelCase__ : Any = k_groups
lowerCAmelCase__ : Union[str, Any] = v_groups
lowerCAmelCase__ : int = post_attention_groups
lowerCAmelCase__ : str = intermediate_groups
lowerCAmelCase__ : Union[str, Any] = output_groups
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Tuple = None
if self.use_input_mask:
lowerCAmelCase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : List[Any] = None
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : Tuple = None
if self.use_labels:
lowerCAmelCase__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ : int = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ : str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def _lowerCamelCase ( self : Optional[int] , a : List[str] , a : List[str] , a : Any , a : Optional[int] , a : str , a : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = SqueezeBertModel(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : List[str] = model(a , a )
lowerCAmelCase__ : Any = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : str , a : Any , a : Tuple , a : int , a : Union[str, Any] , a : Tuple , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = SqueezeBertForMaskedLM(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Any = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : Optional[int] , a : Union[str, Any] , a : Optional[Any] , a : str , a : Optional[Any] , a : str , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Any = SqueezeBertForQuestionAnswering(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : List[str] = model(
a , attention_mask=a , start_positions=a , end_positions=a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self : Tuple , a : List[Any] , a : Optional[int] , a : Union[str, Any] , a : str , a : str , a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.num_labels
lowerCAmelCase__ : Dict = SqueezeBertForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Union[str, Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self : Any , a : int , a : Any , a : Dict , a : Any , a : Tuple , a : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = self.num_labels
lowerCAmelCase__ : Dict = SqueezeBertForTokenClassification(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self : str , a : Optional[int] , a : List[Any] , a : int , a : List[Any] , a : Union[str, Any] , a : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.num_choices
lowerCAmelCase__ : Union[str, Any] = SqueezeBertForMultipleChoice(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : List[str] = model(
a , attention_mask=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.prepare_config_and_inputs()
((lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__)) : List[Any] = config_and_inputs
lowerCAmelCase__ : Dict = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
lowercase = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = True
lowercase = False
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = SqueezeBertModelTester(self )
lowerCAmelCase__ : Dict = ConfigTester(self , config_class=a , dim=37 )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*a )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*a )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*a )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*a )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*a )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*a )
@slow
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Optional[int] = SqueezeBertModel.from_pretrained(a )
self.assertIsNotNone(a )
@require_sentencepiece
@require_tokenizers
@require_torch
class A__ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : int = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' )
lowerCAmelCase__ : str = torch.tensor([[1, 29_414, 232, 328, 740, 1_140, 12_695, 69, 13, 1_588, 2]] )
lowerCAmelCase__ : Any = model(a )[0]
lowerCAmelCase__ : Tuple = torch.Size((1, 3) )
self.assertEqual(output.shape , a )
lowerCAmelCase__ : int = torch.tensor([[0.6_4_0_1, -0.0_3_4_9, -0.6_0_4_1]] )
self.assertTrue(torch.allclose(a , a , atol=1E-4 ) ) | 69 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
lowerCamelCase__ = {
"""configuration_rag""": ["""RagConfig"""],
"""retrieval_rag""": ["""RagRetriever"""],
"""tokenization_rag""": ["""RagTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""RagModel""",
"""RagPreTrainedModel""",
"""RagSequenceForGeneration""",
"""RagTokenForGeneration""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TFRagModel""",
"""TFRagPreTrainedModel""",
"""TFRagSequenceForGeneration""",
"""TFRagTokenForGeneration""",
]
if TYPE_CHECKING:
from .configuration_rag import RagConfig
from .retrieval_rag import RagRetriever
from .tokenization_rag import RagTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_rag import RagModel, RagPreTrainedModel, RagSequenceForGeneration, RagTokenForGeneration
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_rag import (
TFRagModel,
TFRagPreTrainedModel,
TFRagSequenceForGeneration,
TFRagTokenForGeneration,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 69 |
lowerCamelCase__ = """Alexander Joslin"""
import operator as op
from .stack import Stack
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> int:
lowerCAmelCase__ : Union[str, Any] = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
lowerCAmelCase__ : Stack[int] = Stack()
lowerCAmelCase__ : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(SCREAMING_SNAKE_CASE_ ) )
elif i in operators:
# RULE 2
operator_stack.push(SCREAMING_SNAKE_CASE_ )
elif i == ")":
# RULE 4
lowerCAmelCase__ : List[Any] = operator_stack.peek()
operator_stack.pop()
lowerCAmelCase__ : List[str] = operand_stack.peek()
operand_stack.pop()
lowerCAmelCase__ : List[Any] = operand_stack.peek()
operand_stack.pop()
lowerCAmelCase__ : Tuple = operators[opr](SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
operand_stack.push(SCREAMING_SNAKE_CASE_ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase__ = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""") | 69 | 1 |
import numpy as np
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> np.array:
return (2 / (1 + np.exp(-2 * vector ))) - 1
if __name__ == "__main__":
import doctest
doctest.testmod() | 69 |
import numpy
class A__ :
def __init__( self : Tuple , a : numpy.ndarray , a : numpy.ndarray ):
'''simple docstring'''
lowerCAmelCase__ : int = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowerCAmelCase__ : Dict = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowerCAmelCase__ : List[str] = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowerCAmelCase__ : List[Any] = numpy.random.rand(3 , 1 )
# Real output values provided.
lowerCAmelCase__ : str = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowerCAmelCase__ : List[Any] = numpy.zeros(output_array.shape )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowerCAmelCase__ : Tuple = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowerCAmelCase__ : Any = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowerCAmelCase__ : Optional[Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowerCAmelCase__ : int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def _lowerCamelCase ( self : Optional[int] , a : numpy.ndarray , a : int , a : bool ):
'''simple docstring'''
for iteration in range(1 , iterations + 1 ):
lowerCAmelCase__ : Any = self.feedforward()
self.back_propagation()
if give_loss:
lowerCAmelCase__ : Tuple = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f'''Iteration {iteration} Loss: {loss}''' )
def _lowerCamelCase ( self : Optional[Any] , a : numpy.ndarray ):
'''simple docstring'''
lowerCAmelCase__ : Dict = input_arr
lowerCAmelCase__ : Any = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowerCAmelCase__ : int = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowerCAmelCase__ : List[Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> numpy.ndarray:
return 1 / (1 + numpy.exp(-value ))
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> numpy.ndarray:
return (value) * (1 - (value))
def lowerCAmelCase__ ( ) -> int:
lowerCAmelCase__ : Any = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowerCAmelCase__ : int = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
lowerCAmelCase__ : List[str] = TwoHiddenLayerNeuralNetwork(
input_array=SCREAMING_SNAKE_CASE_ , output_array=SCREAMING_SNAKE_CASE_ )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=SCREAMING_SNAKE_CASE_ , iterations=10 , give_loss=SCREAMING_SNAKE_CASE_ )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example() | 69 | 1 |
lowerCamelCase__ = range(2, 20 + 1)
lowerCamelCase__ = [10**k for k in range(ks[-1] + 1)]
lowerCamelCase__ = {}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[Any]:
lowerCAmelCase__ : Tuple = sum(a_i[j] for j in range(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ) )
lowerCAmelCase__ : List[Any] = sum(a_i[j] * base[j] for j in range(min(len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ ) ) )
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = 0, 0
lowerCAmelCase__ : List[Any] = n - i
lowerCAmelCase__ : str = memo.get(SCREAMING_SNAKE_CASE_ )
if sub_memo is not None:
lowerCAmelCase__ : str = sub_memo.get(SCREAMING_SNAKE_CASE_ )
if jumps is not None and len(SCREAMING_SNAKE_CASE_ ) > 0:
# find and make the largest jump without going over
lowerCAmelCase__ : Any = -1
for _k in range(len(SCREAMING_SNAKE_CASE_ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
lowerCAmelCase__ : str = _k
break
if max_jump >= 0:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = jumps[max_jump]
# since the difference between jumps is cached, add c
lowerCAmelCase__ : Optional[Any] = diff + c
for j in range(min(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ) ):
lowerCAmelCase__ , lowerCAmelCase__ : int = divmod(SCREAMING_SNAKE_CASE_ , 10 )
if new_c > 0:
add(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
else:
lowerCAmelCase__ : Dict = []
else:
lowerCAmelCase__ : Any = {c: []}
lowerCAmelCase__ : int = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = next_term(SCREAMING_SNAKE_CASE_ , k - 1 , i + dn , SCREAMING_SNAKE_CASE_ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = compute(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , i + dn , SCREAMING_SNAKE_CASE_ )
diff += _diff
dn += terms_jumped
lowerCAmelCase__ : Optional[Any] = sub_memo[c]
# keep jumps sorted by # of terms skipped
lowerCAmelCase__ : List[str] = 0
while j < len(SCREAMING_SNAKE_CASE_ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(SCREAMING_SNAKE_CASE_ , (diff, dn, k) )
return (diff, dn)
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
if i >= n:
return 0, i
if k > len(SCREAMING_SNAKE_CASE_ ):
a_i.extend([0 for _ in range(k - len(SCREAMING_SNAKE_CASE_ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
lowerCAmelCase__ : Tuple = i
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = 0, 0, 0
for j in range(len(SCREAMING_SNAKE_CASE_ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
lowerCAmelCase__ : Optional[int] = ds_c + ds_b
diff += addend
lowerCAmelCase__ : Any = 0
for j in range(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : Dict = a_i[j] + addend
lowerCAmelCase__ , lowerCAmelCase__ : int = divmod(SCREAMING_SNAKE_CASE_ , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
return diff, i - start_i
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
for j in range(SCREAMING_SNAKE_CASE_ , len(SCREAMING_SNAKE_CASE_ ) ):
lowerCAmelCase__ : Optional[Any] = digits[j] + addend
if s >= 10:
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = divmod(SCREAMING_SNAKE_CASE_ , 10 )
lowerCAmelCase__ : str = addend // 10 + quotient
else:
lowerCAmelCase__ : int = s
lowerCAmelCase__ : int = addend // 10
if addend == 0:
break
while addend > 0:
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = divmod(SCREAMING_SNAKE_CASE_ , 10 )
digits.append(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = 10**15 ) -> int:
lowerCAmelCase__ : str = [1]
lowerCAmelCase__ : str = 1
lowerCAmelCase__ : Dict = 0
while True:
lowerCAmelCase__ , lowerCAmelCase__ : List[str] = next_term(SCREAMING_SNAKE_CASE_ , 20 , i + dn , SCREAMING_SNAKE_CASE_ )
dn += terms_jumped
if dn == n - i:
break
lowerCAmelCase__ : Optional[Any] = 0
for j in range(len(SCREAMING_SNAKE_CASE_ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(F"""{solution() = }""") | 69 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
def __init__( self : int , a : str , a : Union[str, Any]=13 , a : int=32 , a : Optional[Any]=2 , a : Tuple=3 , a : List[Any]=16 , a : List[str]=[1, 2, 1] , a : int=[2, 2, 4] , a : int=2 , a : Optional[Any]=2.0 , a : Optional[int]=True , a : Dict=0.0 , a : Any=0.0 , a : int=0.1 , a : List[str]="gelu" , a : Optional[Any]=False , a : str=True , a : Dict=0.0_2 , a : Any=1E-5 , a : Optional[int]=True , a : str=None , a : str=True , a : int=10 , a : str=8 , ):
'''simple docstring'''
lowerCAmelCase__ : str = parent
lowerCAmelCase__ : Union[str, Any] = batch_size
lowerCAmelCase__ : List[str] = image_size
lowerCAmelCase__ : Optional[Any] = patch_size
lowerCAmelCase__ : Tuple = num_channels
lowerCAmelCase__ : Optional[int] = embed_dim
lowerCAmelCase__ : Tuple = depths
lowerCAmelCase__ : List[str] = num_heads
lowerCAmelCase__ : List[Any] = window_size
lowerCAmelCase__ : Any = mlp_ratio
lowerCAmelCase__ : Optional[Any] = qkv_bias
lowerCAmelCase__ : Any = hidden_dropout_prob
lowerCAmelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase__ : int = drop_path_rate
lowerCAmelCase__ : Optional[Any] = hidden_act
lowerCAmelCase__ : int = use_absolute_embeddings
lowerCAmelCase__ : List[str] = patch_norm
lowerCAmelCase__ : Optional[int] = layer_norm_eps
lowerCAmelCase__ : List[str] = initializer_range
lowerCAmelCase__ : Optional[Any] = is_training
lowerCAmelCase__ : List[Any] = scope
lowerCAmelCase__ : Dict = use_labels
lowerCAmelCase__ : List[Any] = type_sequence_label_size
lowerCAmelCase__ : Optional[Any] = encoder_stride
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : Optional[Any] = None
if self.use_labels:
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : int = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _lowerCamelCase ( self : List[str] , a : Any , a : str , a : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = SwinvaModel(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[int] = model(a )
lowerCAmelCase__ : str = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCAmelCase__ : Tuple = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _lowerCamelCase ( self : Union[str, Any] , a : Optional[Any] , a : Tuple , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Any = SwinvaForMaskedImageModeling(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : str = model(a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCAmelCase__ : Any = 1
lowerCAmelCase__ : Dict = SwinvaForMaskedImageModeling(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ : List[str] = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _lowerCamelCase ( self : Union[str, Any] , a : int , a : str , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : str = self.type_sequence_label_size
lowerCAmelCase__ : List[Any] = SwinvaForImageClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Union[str, Any] = model(a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = config_and_inputs
lowerCAmelCase__ : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
lowercase = (
{'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = SwinvaModelTester(self )
lowerCAmelCase__ : int = ConfigTester(self , config_class=a , embed_dim=37 )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds' )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : int = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Optional[int] = model_class(a )
lowerCAmelCase__ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : Tuple = [*signature.parameters.keys()]
lowerCAmelCase__ : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Optional[int] = True
for model_class in self.all_model_classes:
lowerCAmelCase__ : Tuple = True
lowerCAmelCase__ : str = False
lowerCAmelCase__ : List[Any] = True
lowerCAmelCase__ : Dict = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : int = model(**self._prepare_for_class(a , a ) )
lowerCAmelCase__ : Dict = outputs.attentions
lowerCAmelCase__ : Dict = len(self.model_tester.depths )
self.assertEqual(len(a ) , a )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase__ : List[str] = True
lowerCAmelCase__ : Optional[int] = config.window_size**2
lowerCAmelCase__ : str = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model(**self._prepare_for_class(a , a ) )
lowerCAmelCase__ : Optional[Any] = outputs.attentions
self.assertEqual(len(a ) , a )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
lowerCAmelCase__ : Tuple = len(a )
# Check attention is always last and order is fine
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : str = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(**self._prepare_for_class(a , a ) )
if hasattr(self.model_tester , 'num_hidden_states_types' ):
lowerCAmelCase__ : Optional[Any] = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
lowerCAmelCase__ : Any = 2
self.assertEqual(out_len + added_hidden_states , len(a ) )
lowerCAmelCase__ : Dict = outputs.attentions
self.assertEqual(len(a ) , a )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def _lowerCamelCase ( self : int , a : Optional[int] , a : int , a : Optional[Any] , a : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model(**self._prepare_for_class(a , a ) )
lowerCAmelCase__ : Optional[Any] = outputs.hidden_states
lowerCAmelCase__ : str = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(a ) , a )
# Swinv2 has a different seq_length
lowerCAmelCase__ : int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase__ : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCAmelCase__ : Union[str, Any] = outputs.reshaped_hidden_states
self.assertEqual(len(a ) , a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = reshaped_hidden_states[0].shape
lowerCAmelCase__ : List[str] = (
reshaped_hidden_states[0].view(a , a , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowerCAmelCase__ : Any = True
self.check_hidden_states_output(a , a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ : List[str] = True
self.check_hidden_states_output(a , a , a , a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Any = 3
lowerCAmelCase__ : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCAmelCase__ : str = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase__ : str = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCAmelCase__ : Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowerCAmelCase__ : str = True
self.check_hidden_states_output(a , a , a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ : Any = True
self.check_hidden_states_output(a , a , a , (padded_height, padded_width) )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
@slow
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : List[str] = SwinvaModel.from_pretrained(a )
self.assertIsNotNone(a )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Optional[int] = _config_zero_init(a )
for model_class in self.all_model_classes:
lowerCAmelCase__ : int = model_class(config=a )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class A__ ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' )
if is_vision_available()
else None
)
@slow
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to(
a )
lowerCAmelCase__ : Dict = self.default_image_processor
lowerCAmelCase__ : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCAmelCase__ : Any = image_processor(images=a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowerCAmelCase__ : Union[str, Any] = model(**a )
# verify the logits
lowerCAmelCase__ : List[str] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , a )
lowerCAmelCase__ : Optional[Any] = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1E-4 ) ) | 69 | 1 |
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
lowerCamelCase__ = logging.get_logger(__name__)
class A__ ( __magic_name__ ):
def __init__( self : List[Any] , *a : str , **a : Union[str, Any] ):
'''simple docstring'''
warnings.warn(
'The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers.'
' Please use MobileViTImageProcessor instead.' , a , )
super().__init__(*a , **a ) | 69 |
from itertools import permutations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bool:
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
lowerCAmelCase__ : str = [7, 11, 13, 17]
for i, test in enumerate(SCREAMING_SNAKE_CASE_ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = 10 ) -> int:
return sum(
int(''.join(map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) )
for num in permutations(range(SCREAMING_SNAKE_CASE_ ) )
if is_substring_divisible(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
print(F"""{solution() = }""") | 69 | 1 |
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A__ ( __magic_name__ ):
lowercase = (DDPMParallelScheduler,)
def _lowerCamelCase ( self : str , **a : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : str = {
'num_train_timesteps': 1_000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**a )
return config
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=a , beta_end=a )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=a )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
self.check_over_configs(thresholding=a )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=a , prediction_type=a , sample_max_value=a , )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=a )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.scheduler_classes[0]
lowerCAmelCase__ : Any = self.get_scheduler_config()
lowerCAmelCase__ : List[str] = scheduler_class(**a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1E-5
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.scheduler_classes[0]
lowerCAmelCase__ : Any = self.get_scheduler_config()
lowerCAmelCase__ : int = scheduler_class(**a )
lowerCAmelCase__ : str = len(a )
lowerCAmelCase__ : Tuple = self.dummy_model()
lowerCAmelCase__ : Optional[Any] = self.dummy_sample_deter
lowerCAmelCase__ : int = self.dummy_sample_deter + 0.1
lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter - 0.1
lowerCAmelCase__ : Tuple = samplea.shape[0]
lowerCAmelCase__ : List[Any] = torch.stack([samplea, samplea, samplea] , dim=0 )
lowerCAmelCase__ : Optional[Any] = torch.arange(a )[0:3, None].repeat(1 , a )
lowerCAmelCase__ : List[str] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
lowerCAmelCase__ : Tuple = scheduler.batch_step_no_noise(a , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
lowerCAmelCase__ : str = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Any = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1E-2
assert abs(result_mean.item() - 0.5_0_0_5 ) < 1E-3
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : str = self.scheduler_classes[0]
lowerCAmelCase__ : List[Any] = self.get_scheduler_config()
lowerCAmelCase__ : Dict = scheduler_class(**a )
lowerCAmelCase__ : str = len(a )
lowerCAmelCase__ : Any = self.dummy_model()
lowerCAmelCase__ : int = self.dummy_sample_deter
lowerCAmelCase__ : Tuple = torch.manual_seed(0 )
for t in reversed(range(a ) ):
# 1. predict noise residual
lowerCAmelCase__ : Optional[Any] = model(a , a )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase__ : int = scheduler.step(a , a , a , generator=a ).prev_sample
lowerCAmelCase__ : List[str] = pred_prev_sample
lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1E-3
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = self.scheduler_classes[0]
lowerCAmelCase__ : Dict = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCAmelCase__ : int = scheduler_class(**a )
lowerCAmelCase__ : str = len(a )
lowerCAmelCase__ : Optional[int] = self.dummy_model()
lowerCAmelCase__ : List[str] = self.dummy_sample_deter
lowerCAmelCase__ : Optional[Any] = torch.manual_seed(0 )
for t in reversed(range(a ) ):
# 1. predict noise residual
lowerCAmelCase__ : List[Any] = model(a , a )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase__ : Optional[int] = scheduler.step(a , a , a , generator=a ).prev_sample
lowerCAmelCase__ : str = pred_prev_sample
lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Any = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1E-3
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.scheduler_classes[0]
lowerCAmelCase__ : Any = self.get_scheduler_config()
lowerCAmelCase__ : Optional[int] = scheduler_class(**a )
lowerCAmelCase__ : Optional[Any] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=a )
lowerCAmelCase__ : List[Any] = scheduler.timesteps
for i, timestep in enumerate(a ):
if i == len(a ) - 1:
lowerCAmelCase__ : Tuple = -1
else:
lowerCAmelCase__ : Dict = timesteps[i + 1]
lowerCAmelCase__ : str = scheduler.previous_timestep(a )
lowerCAmelCase__ : int = prev_t.item()
self.assertEqual(a , a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.scheduler_classes[0]
lowerCAmelCase__ : Optional[int] = self.get_scheduler_config()
lowerCAmelCase__ : Optional[Any] = scheduler_class(**a )
lowerCAmelCase__ : str = [100, 87, 50, 51, 0]
with self.assertRaises(a , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=a )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.scheduler_classes[0]
lowerCAmelCase__ : str = self.get_scheduler_config()
lowerCAmelCase__ : Optional[int] = scheduler_class(**a )
lowerCAmelCase__ : str = [100, 87, 50, 1, 0]
lowerCAmelCase__ : int = len(a )
with self.assertRaises(a , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=a , timesteps=a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.scheduler_classes[0]
lowerCAmelCase__ : Dict = self.get_scheduler_config()
lowerCAmelCase__ : Optional[int] = scheduler_class(**a )
lowerCAmelCase__ : str = [scheduler.config.num_train_timesteps]
with self.assertRaises(
a , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=a ) | 69 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = ConsistencyModelPipeline
lowercase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowercase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
lowercase = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'output_type',
'return_dict',
'callback',
'callback_steps',
] )
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet' , )
return unet
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , )
return unet
def _lowerCamelCase ( self : Optional[Any] , a : Union[str, Any]=False ):
'''simple docstring'''
if class_cond:
lowerCAmelCase__ : Tuple = self.dummy_cond_unet
else:
lowerCAmelCase__ : Dict = self.dummy_uncond_unet
# Default to CM multistep sampler
lowerCAmelCase__ : Optional[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
lowerCAmelCase__ : List[Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def _lowerCamelCase ( self : int , a : Optional[int] , a : Any=0 ):
'''simple docstring'''
if str(a ).startswith('mps' ):
lowerCAmelCase__ : List[str] = torch.manual_seed(a )
else:
lowerCAmelCase__ : str = torch.Generator(device=a ).manual_seed(a )
lowerCAmelCase__ : str = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [22, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : Optional[Any] = self.get_dummy_components()
lowerCAmelCase__ : List[Any] = ConsistencyModelPipeline(**a )
lowerCAmelCase__ : Tuple = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : str = self.get_dummy_inputs(a )
lowerCAmelCase__ : str = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ : str = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Tuple = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : Tuple = self.get_dummy_components(class_cond=a )
lowerCAmelCase__ : Union[str, Any] = ConsistencyModelPipeline(**a )
lowerCAmelCase__ : Tuple = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a )
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : Union[str, Any] = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
lowerCAmelCase__ : str = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : Union[str, Any] = self.get_dummy_components()
lowerCAmelCase__ : Tuple = ConsistencyModelPipeline(**a )
lowerCAmelCase__ : Dict = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Union[str, Any] = self.get_dummy_inputs(a )
lowerCAmelCase__ : Optional[Any] = 1
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : List[Any] = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Optional[Any] = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : Optional[int] = self.get_dummy_components(class_cond=a )
lowerCAmelCase__ : List[Any] = ConsistencyModelPipeline(**a )
lowerCAmelCase__ : Optional[Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Tuple = self.get_dummy_inputs(a )
lowerCAmelCase__ : Dict = 1
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ : str = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Dict = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Optional[Any] , a : Tuple=0 , a : Optional[Any]=False , a : Optional[Any]="cpu" , a : Union[str, Any]=torch.floataa , a : Dict=(1, 3, 64, 64) ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(a )
lowerCAmelCase__ : List[Any] = {
'num_inference_steps': None,
'timesteps': [22, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
lowerCAmelCase__ : Optional[int] = self.get_fixed_latents(seed=a , device=a , dtype=a , shape=a )
lowerCAmelCase__ : Tuple = latents
return inputs
def _lowerCamelCase ( self : str , a : Tuple=0 , a : Tuple="cpu" , a : Tuple=torch.floataa , a : str=(1, 3, 64, 64) ):
'''simple docstring'''
if type(a ) == str:
lowerCAmelCase__ : str = torch.device(a )
lowerCAmelCase__ : List[str] = torch.Generator(device=a ).manual_seed(a )
lowerCAmelCase__ : Any = randn_tensor(a , generator=a , device=a , dtype=a )
return latents
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : int = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
lowerCAmelCase__ : List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
lowerCAmelCase__ : List[Any] = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Optional[Any] = self.get_inputs()
lowerCAmelCase__ : Dict = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : List[str] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Union[str, Any] = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
lowerCAmelCase__ : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
lowerCAmelCase__ : Optional[int] = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[str] = self.get_inputs()
lowerCAmelCase__ : Union[str, Any] = 1
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : List[str] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : Optional[int] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Union[str, Any] = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
@require_torch_a
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
lowerCAmelCase__ : List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
lowerCAmelCase__ : Tuple = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : str = self.get_inputs(get_fixed_latents=a , device=a )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ):
lowerCAmelCase__ : Dict = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : str = image[0, -3:, -3:, -1]
lowerCAmelCase__ : str = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@require_torch_a
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
lowerCAmelCase__ : List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
lowerCAmelCase__ : Dict = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Any = self.get_inputs(get_fixed_latents=a , device=a )
lowerCAmelCase__ : List[str] = 1
lowerCAmelCase__ : str = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ):
lowerCAmelCase__ : List[str] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : Dict = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Optional[int] = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 | 69 | 1 |
from typing import List, Optional, Tuple
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_herbert import HerbertTokenizer
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/vocab.json"""
},
"""merges_file""": {
"""allegro/herbert-base-cased""": """https://huggingface.co/allegro/herbert-base-cased/resolve/main/merges.txt"""
},
}
lowerCamelCase__ = {"""allegro/herbert-base-cased""": 514}
lowerCamelCase__ = {}
class A__ ( __magic_name__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_INIT_CONFIGURATION
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = HerbertTokenizer
def __init__( self : int , a : Dict=None , a : str=None , a : str=None , a : List[Any]="<s>" , a : Union[str, Any]="<unk>" , a : Union[str, Any]="<pad>" , a : Union[str, Any]="<mask>" , a : List[str]="</s>" , **a : List[Any] , ):
'''simple docstring'''
super().__init__(
a , a , tokenizer_file=a , cls_token=a , unk_token=a , pad_token=a , mask_token=a , sep_token=a , **a , )
def _lowerCamelCase ( self : List[Any] , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
lowerCAmelCase__ : Dict = [self.cls_token_id]
lowerCAmelCase__ : Any = [self.sep_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowerCamelCase ( self : Optional[Any] , a : List[int] , a : Optional[List[int]] = None , a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=a , token_ids_a=a , already_has_special_tokens=a )
if token_ids_a is None:
return [1] + ([0] * len(a )) + [1]
return [1] + ([0] * len(a )) + [1] + ([0] * len(a )) + [1]
def _lowerCamelCase ( self : List[str] , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = [self.sep_token_id]
lowerCAmelCase__ : Optional[int] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCamelCase ( self : Optional[Any] , a : str , a : Optional[str] = None ):
'''simple docstring'''
lowerCAmelCase__ : int = self._tokenizer.model.save(a , name=a )
return tuple(a ) | 69 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class A__ ( __magic_name__ ):
def __init__( self : int , a : List[str] , a : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = params
lowerCAmelCase__ : Union[str, Any] = np.array(a )
lowerCAmelCase__ : List[Any] = np.array([len(a ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : str , a : List[str] ):
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self : Optional[int] ):
'''simple docstring'''
return len(self.lengths )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.params.max_model_input_size
lowerCAmelCase__ : Optional[int] = self.lengths > max_len
logger.info(f'''Splitting {sum(a )} too long sequences.''' )
def divide_chunks(a : List[str] , a : Tuple ):
return [l[i : i + n] for i in range(0 , len(a ) , a )]
lowerCAmelCase__ : Union[str, Any] = []
lowerCAmelCase__ : Union[str, Any] = []
if self.params.mlm:
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
else:
lowerCAmelCase__ , lowerCAmelCase__ : int = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
lowerCAmelCase__ : Optional[int] = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
lowerCAmelCase__ : Dict = np.insert(a , 0 , a )
if sub_s[-1] != sep_id:
lowerCAmelCase__ : Dict = np.insert(a , len(a ) , a )
assert len(a ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(a )
new_tok_ids.extend(a )
new_lengths.extend([len(a ) for l in sub_seqs] )
lowerCAmelCase__ : str = np.array(a )
lowerCAmelCase__ : Optional[Any] = np.array(a )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = len(self )
lowerCAmelCase__ : List[Any] = self.lengths > 11
lowerCAmelCase__ : Dict = self.token_ids[indices]
lowerCAmelCase__ : Tuple = self.lengths[indices]
lowerCAmelCase__ : Any = len(self )
logger.info(f'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
lowerCAmelCase__ : int = self.params.special_tok_ids['unk_token']
lowerCAmelCase__ : str = len(self )
lowerCAmelCase__ : List[str] = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
lowerCAmelCase__ : int = (unk_occs / self.lengths) < 0.5
lowerCAmelCase__ : List[str] = self.token_ids[indices]
lowerCAmelCase__ : Optional[Any] = self.lengths[indices]
lowerCAmelCase__ : Union[str, Any] = len(self )
logger.info(f'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
if not self.params.is_master:
return
logger.info(f'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def _lowerCamelCase ( self : int , a : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = [t[0] for t in batch]
lowerCAmelCase__ : List[str] = [t[1] for t in batch]
assert len(a ) == len(a )
# Max for paddings
lowerCAmelCase__ : List[str] = max(a )
# Pad token ids
if self.params.mlm:
lowerCAmelCase__ : str = self.params.special_tok_ids['pad_token']
else:
lowerCAmelCase__ : Optional[int] = self.params.special_tok_ids['unk_token']
lowerCAmelCase__ : Tuple = [list(t.astype(a ) ) + [pad_idx] * (max_seq_len_ - len(a )) for t in token_ids]
assert len(tk_ ) == len(a )
assert all(len(a ) == max_seq_len_ for t in tk_ )
lowerCAmelCase__ : Union[str, Any] = torch.tensor(tk_ ) # (bs, max_seq_len_)
lowerCAmelCase__ : List[str] = torch.tensor(a ) # (bs)
return tk_t, lg_t | 69 | 1 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
lowerCamelCase__ = """Create a default config file for Accelerate with only a few flags set."""
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_="no" , SCREAMING_SNAKE_CASE_ = default_json_config_file , SCREAMING_SNAKE_CASE_ = False ) -> str:
lowerCAmelCase__ : Optional[Any] = Path(SCREAMING_SNAKE_CASE_ )
path.parent.mkdir(parents=SCREAMING_SNAKE_CASE_ , exist_ok=SCREAMING_SNAKE_CASE_ )
if path.exists():
print(
F'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''' )
return False
lowerCAmelCase__ : Tuple = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
F'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''' )
lowerCAmelCase__ : Any = {
'compute_environment': 'LOCAL_MACHINE',
'mixed_precision': mixed_precision,
}
if torch.cuda.is_available():
lowerCAmelCase__ : Union[str, Any] = torch.cuda.device_count()
lowerCAmelCase__ : int = num_gpus
lowerCAmelCase__ : Any = False
if num_gpus > 1:
lowerCAmelCase__ : Dict = 'MULTI_GPU'
else:
lowerCAmelCase__ : Any = 'NO'
elif is_xpu_available() and use_xpu:
lowerCAmelCase__ : Tuple = torch.xpu.device_count()
lowerCAmelCase__ : int = num_xpus
lowerCAmelCase__ : Any = False
if num_xpus > 1:
lowerCAmelCase__ : Dict = 'MULTI_XPU'
else:
lowerCAmelCase__ : Dict = 'NO'
elif is_npu_available():
lowerCAmelCase__ : int = torch.npu.device_count()
lowerCAmelCase__ : Optional[int] = num_npus
lowerCAmelCase__ : List[str] = False
if num_npus > 1:
lowerCAmelCase__ : Union[str, Any] = 'MULTI_NPU'
else:
lowerCAmelCase__ : int = 'NO'
else:
lowerCAmelCase__ : str = 0
lowerCAmelCase__ : List[Any] = True
lowerCAmelCase__ : Optional[int] = 1
lowerCAmelCase__ : List[str] = 'NO'
lowerCAmelCase__ : List[str] = ClusterConfig(**SCREAMING_SNAKE_CASE_ )
config.to_json_file(SCREAMING_SNAKE_CASE_ )
return path
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
lowerCAmelCase__ : int = parser.add_parser('default' , parents=SCREAMING_SNAKE_CASE_ , help=SCREAMING_SNAKE_CASE_ , formatter_class=SCREAMING_SNAKE_CASE_ )
parser.add_argument(
'--config_file' , default=SCREAMING_SNAKE_CASE_ , help=(
'The path to use to store the config file. Will default to a file named default_config.yaml in the cache '
'location, which is the content of the environment `HF_HOME` suffixed with \'accelerate\', or if you don\'t have '
'such an environment variable, your cache directory (\'~/.cache\' or the content of `XDG_CACHE_HOME`) suffixed '
'with \'huggingface\'.'
) , dest='save_location' , )
parser.add_argument(
'--mixed_precision' , choices=['no', 'fp16', 'bf16'] , type=SCREAMING_SNAKE_CASE_ , help='Whether or not to use mixed precision training. '
'Choose between FP16 and BF16 (bfloat16) training. '
'BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later.' , default='no' , )
parser.set_defaults(func=SCREAMING_SNAKE_CASE_ )
return parser
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Dict:
lowerCAmelCase__ : Any = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(F'''accelerate configuration saved at {config_file}''' ) | 69 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
) | 69 | 1 |
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=None ) -> Dict:
# set parameter of one layer
assert torch_layer.weight.shape == weight.shape, F'''{torch_layer} layer.weight does not match'''
lowerCAmelCase__ : str = nn.Parameter(SCREAMING_SNAKE_CASE_ )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, F'''{torch_layer} layer.bias does not match'''
lowerCAmelCase__ : List[str] = nn.Parameter(SCREAMING_SNAKE_CASE_ )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> List[str]:
# set torch weights for 1-to-1 comparison
lowerCAmelCase__ : str = np.asarray(weights[0] )
lowerCAmelCase__ : Dict = np.asarray(weights[1] )
lowerCAmelCase__ : Any = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(SCREAMING_SNAKE_CASE_ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(SCREAMING_SNAKE_CASE_ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(SCREAMING_SNAKE_CASE_ ).view(-1 , SCREAMING_SNAKE_CASE_ ).contiguous().transpose(0 , 1 ) , )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
# set torch weights for 1-to-1 comparison
lowerCAmelCase__ : Optional[int] = np.asarray(weights[0] )
lowerCAmelCase__ : Any = np.asarray(weights[1] )
lowerCAmelCase__ : Tuple = np.asarray(weights[2] )
lowerCAmelCase__ : Any = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(SCREAMING_SNAKE_CASE_ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE_ ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(SCREAMING_SNAKE_CASE_ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE_ ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(SCREAMING_SNAKE_CASE_ ).transpose(1 , 2 ).contiguous().view(-1 , SCREAMING_SNAKE_CASE_ ) , )
set_param(
torch_layer.output.dense , torch.tensor(SCREAMING_SNAKE_CASE_ ).view(-1 , SCREAMING_SNAKE_CASE_ ).contiguous().transpose(0 , 1 ) , )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
# layernorm 1
lowerCAmelCase__ : List[Any] = weights[0][0][0]
lowerCAmelCase__ : int = np.asarray(layer_norm_a[0] )
lowerCAmelCase__ : int = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE_ ) , torch.tensor(SCREAMING_SNAKE_CASE_ ) , )
# lsh weights + output
lowerCAmelCase__ : Optional[Any] = weights[0][1]
if len(SCREAMING_SNAKE_CASE_ ) < 4:
set_layer_weights_in_torch_lsh(SCREAMING_SNAKE_CASE_ , torch_block.attention , SCREAMING_SNAKE_CASE_ )
else:
set_layer_weights_in_torch_local(SCREAMING_SNAKE_CASE_ , torch_block.attention , SCREAMING_SNAKE_CASE_ )
# intermediate weighs
lowerCAmelCase__ : List[Any] = weights[2][0][1][2]
# Chunked Feed Forward
if len(SCREAMING_SNAKE_CASE_ ) == 4:
lowerCAmelCase__ : Dict = intermediate_weights[2]
# layernorm 2
lowerCAmelCase__ : List[str] = np.asarray(intermediate_weights[0][0] )
lowerCAmelCase__ : List[Any] = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE_ ) , torch.tensor(SCREAMING_SNAKE_CASE_ ) , )
# intermediate dense
lowerCAmelCase__ : Union[str, Any] = np.asarray(intermediate_weights[1][0] )
lowerCAmelCase__ : List[Any] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(SCREAMING_SNAKE_CASE_ ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE_ ) , )
# intermediate out
lowerCAmelCase__ : Any = np.asarray(intermediate_weights[4][0] )
lowerCAmelCase__ : Tuple = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(SCREAMING_SNAKE_CASE_ ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE_ ) , )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Any:
# reformer model
lowerCAmelCase__ : Union[str, Any] = torch_model.reformer
# word embeds
lowerCAmelCase__ : Tuple = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(SCREAMING_SNAKE_CASE_ ) , )
if isinstance(weights[3] , SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : List[Any] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
lowerCAmelCase__ : int = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), F'''{position_embeddings[emb_idx]} emb does not match'''
lowerCAmelCase__ : int = nn.Parameter(torch.tensor(SCREAMING_SNAKE_CASE_ ) )
lowerCAmelCase__ : List[Any] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
SCREAMING_SNAKE_CASE_ ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
lowerCAmelCase__ : Tuple = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# output layer norm
lowerCAmelCase__ : Any = np.asarray(weights[7][0] )
lowerCAmelCase__ : Union[str, Any] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(SCREAMING_SNAKE_CASE_ ) , torch.tensor(SCREAMING_SNAKE_CASE_ ) , )
# output embeddings
lowerCAmelCase__ : List[str] = np.asarray(weights[9][0] )
lowerCAmelCase__ : int = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(SCREAMING_SNAKE_CASE_ ).transpose(0 , 1 ).contiguous() , torch.tensor(SCREAMING_SNAKE_CASE_ ) , )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Union[str, Any]:
# Initialise PyTorch model
lowerCAmelCase__ : Dict = ReformerConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
print(F'''Building PyTorch model from configuration: {config}''' )
lowerCAmelCase__ : List[Any] = ReformerModelWithLMHead(SCREAMING_SNAKE_CASE_ )
with open(SCREAMING_SNAKE_CASE_ , 'rb' ) as f:
lowerCAmelCase__ : Dict = pickle.load(SCREAMING_SNAKE_CASE_ )['weights']
set_model_weights_in_torch(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , config.hidden_size )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--trax_model_pkl_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained Reformer model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase__ = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path) | 69 |
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_rembert import RemBertTokenizer
else:
lowerCamelCase__ = None
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {"""vocab_file""": """sentencepiece.model""", """tokenizer_file""": """tokenizer.json"""}
lowerCamelCase__ = {
"""vocab_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/sentencepiece.model""",
},
"""tokenizer_file""": {
"""google/rembert""": """https://huggingface.co/google/rembert/resolve/main/tokenizer.json""",
},
}
lowerCamelCase__ = {
"""google/rembert""": 256,
}
lowerCamelCase__ = """▁"""
class A__ ( __magic_name__ ):
lowercase = VOCAB_FILES_NAMES
lowercase = PRETRAINED_VOCAB_FILES_MAP
lowercase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowercase = RemBertTokenizer
def __init__( self : Optional[Any] , a : str=None , a : Any=None , a : List[Any]=True , a : str=True , a : Dict=False , a : Dict="[CLS]" , a : int="[SEP]" , a : Tuple="<unk>" , a : Optional[Any]="[SEP]" , a : Tuple="<pad>" , a : Dict="[CLS]" , a : Optional[Any]="[MASK]" , **a : str , ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = AddedToken(a , lstrip=a , rstrip=a ) if isinstance(a , a ) else mask_token
super().__init__(
a , tokenizer_file=a , do_lower_case=a , remove_space=a , keep_accents=a , bos_token=a , eos_token=a , unk_token=a , sep_token=a , pad_token=a , cls_token=a , mask_token=a , **a , )
lowerCAmelCase__ : int = do_lower_case
lowerCAmelCase__ : int = remove_space
lowerCAmelCase__ : List[Any] = keep_accents
lowerCAmelCase__ : Optional[Any] = vocab_file
lowerCAmelCase__ : Union[str, Any] = False if not self.vocab_file else True
def _lowerCamelCase ( self : Dict , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
lowerCAmelCase__ : Dict = [self.sep_token_id]
lowerCAmelCase__ : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowerCamelCase ( self : str , a : List[int] , a : Optional[List[int]] = None , a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
'You should not supply a second sequence if the provided sequence of '
'ids is already formatted with special tokens for the model.' )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is not None:
return [1] + ([0] * len(a )) + [1] + ([0] * len(a )) + [1]
return [1] + ([0] * len(a )) + [1]
def _lowerCamelCase ( self : List[Any] , a : List[int] , a : Optional[List[int]] = None ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = [self.sep_token_id]
lowerCAmelCase__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowerCamelCase ( self : Tuple , a : str , a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(a ):
logger.error('Vocabulary path ({}) should be a directory'.format(a ) )
return
lowerCAmelCase__ : int = os.path.join(
a , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(a ):
copyfile(self.vocab_file , a )
return (out_vocab_file,) | 69 | 1 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
return " ".join(
''.join(word[::-1] ) if len(SCREAMING_SNAKE_CASE_ ) > 4 else word for word in sentence.split() )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(reverse_long_words("""Hey wollef sroirraw""")) | 69 |
from typing import Callable, Optional
from .. import Features
from ..packaged_modules.generator.generator import Generator
from .abc import AbstractDatasetInputStream
class A__ ( __magic_name__ ):
def __init__( self : List[Any] , a : Callable , a : Optional[Features] = None , a : str = None , a : bool = False , a : bool = False , a : Optional[dict] = None , a : Optional[int] = None , **a : str , ):
'''simple docstring'''
super().__init__(
features=a , cache_dir=a , keep_in_memory=a , streaming=a , num_proc=a , **a , )
lowerCAmelCase__ : int = Generator(
cache_dir=a , features=a , generator=a , gen_kwargs=a , **a , )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
if self.streaming:
lowerCAmelCase__ : List[Any] = self.builder.as_streaming_dataset(split='train' )
# Build regular (map-style) dataset
else:
lowerCAmelCase__ : Any = None
lowerCAmelCase__ : int = None
lowerCAmelCase__ : List[Any] = None
lowerCAmelCase__ : Dict = None
self.builder.download_and_prepare(
download_config=a , download_mode=a , verification_mode=a , base_path=a , num_proc=self.num_proc , )
lowerCAmelCase__ : Union[str, Any] = self.builder.as_dataset(
split='train' , verification_mode=a , in_memory=self.keep_in_memory )
return dataset | 69 | 1 |
import unittest
import numpy as np
import torch
from torch import nn
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import KandinskyVaaPriorPipeline, PriorTransformer, UnCLIPScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import enable_full_determinism, skip_mps
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = KandinskyVaaPriorPipeline
lowercase = ['prompt']
lowercase = ['prompt', 'negative_prompt']
lowercase = [
'num_images_per_prompt',
'generator',
'num_inference_steps',
'latents',
'negative_prompt',
'guidance_scale',
'output_type',
'return_dict',
]
lowercase = False
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return 32
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return self.time_input_dim
@property
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
return 100
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Dict = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : Optional[Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_000 , )
return CLIPTextModelWithProjection(a )
@property
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : List[str] = {
'num_attention_heads': 2,
'attention_head_dim': 12,
'embedding_dim': self.text_embedder_hidden_size,
'num_layers': 1,
}
lowerCAmelCase__ : Union[str, Any] = PriorTransformer(**a )
# clip_std and clip_mean is initialized to be 0 so PriorTransformer.post_process_latents will always return 0 - set clip_std to be 1 so it won't return 0
lowerCAmelCase__ : Optional[int] = nn.Parameter(torch.ones(model.clip_std.shape ) )
return model
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
lowerCAmelCase__ : str = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , image_size=224 , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , num_attention_heads=4 , num_channels=3 , num_hidden_layers=5 , patch_size=14 , )
lowerCAmelCase__ : str = CLIPVisionModelWithProjection(a )
return model
@property
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Dict = CLIPImageProcessor(
crop_size=224 , do_center_crop=a , do_normalize=a , do_resize=a , image_mean=[0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3] , image_std=[0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1] , resample=3 , size=224 , )
return image_processor
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.dummy_prior
lowerCAmelCase__ : List[str] = self.dummy_image_encoder
lowerCAmelCase__ : List[Any] = self.dummy_text_encoder
lowerCAmelCase__ : Tuple = self.dummy_tokenizer
lowerCAmelCase__ : Optional[Any] = self.dummy_image_processor
lowerCAmelCase__ : Any = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='sample' , num_train_timesteps=1_000 , clip_sample=a , clip_sample_range=1_0.0 , )
lowerCAmelCase__ : int = {
'prior': prior,
'image_encoder': image_encoder,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'scheduler': scheduler,
'image_processor': image_processor,
}
return components
def _lowerCamelCase ( self : List[str] , a : Dict , a : List[str]=0 ):
'''simple docstring'''
if str(a ).startswith('mps' ):
lowerCAmelCase__ : int = torch.manual_seed(a )
else:
lowerCAmelCase__ : str = torch.Generator(device=a ).manual_seed(a )
lowerCAmelCase__ : Dict = {
'prompt': 'horse',
'generator': generator,
'guidance_scale': 4.0,
'num_inference_steps': 2,
'output_type': 'np',
}
return inputs
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = 'cpu'
lowerCAmelCase__ : Union[str, Any] = self.get_dummy_components()
lowerCAmelCase__ : Any = self.pipeline_class(**a )
lowerCAmelCase__ : Union[str, Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Union[str, Any] = pipe(**self.get_dummy_inputs(a ) )
lowerCAmelCase__ : List[Any] = output.image_embeds
lowerCAmelCase__ : Optional[Any] = pipe(
**self.get_dummy_inputs(a ) , return_dict=a , )[0]
lowerCAmelCase__ : Any = image[0, -10:]
lowerCAmelCase__ : Dict = image_from_tuple[0, -10:]
assert image.shape == (1, 32)
lowerCAmelCase__ : str = np.array(
[-0.0_5_3_2, 1.7_1_2_0, 0.3_6_5_6, -1.0_8_5_2, -0.8_9_4_6, -1.1_7_5_6, 0.4_3_4_8, 0.2_4_8_2, 0.5_1_4_6, -0.1_1_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
@skip_mps
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = torch_device == 'cpu'
lowerCAmelCase__ : Optional[Any] = True
lowerCAmelCase__ : Any = False
self._test_inference_batch_single_identical(
test_max_difference=a , relax_max_difference=a , test_mean_pixel_difference=a , )
@skip_mps
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = torch_device == 'cpu'
lowerCAmelCase__ : int = False
self._test_attention_slicing_forward_pass(
test_max_difference=a , test_mean_pixel_difference=a , ) | 69 |
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCamelCase__ = logging.get_logger(__name__)
class A__ ( __magic_name__ ):
lowercase = ['audio_values', 'audio_mask']
def __init__( self : Dict , a : Dict=2_048 , a : Optional[Any]=1 , a : List[Any]=[16, 16] , a : Dict=128 , a : List[str]=44_100 , a : Union[str, Any]=86 , a : Optional[Any]=2_048 , a : List[Any]=0.0 , **a : Tuple , ):
'''simple docstring'''
super().__init__(
feature_size=a , sampling_rate=a , padding_value=a , **a , )
lowerCAmelCase__ : Optional[Any] = spectrogram_length
lowerCAmelCase__ : str = num_channels
lowerCAmelCase__ : Tuple = patch_size
lowerCAmelCase__ : Optional[int] = feature_size // self.patch_size[1]
lowerCAmelCase__ : Union[str, Any] = n_fft
lowerCAmelCase__ : Union[str, Any] = sampling_rate // hop_length_to_sampling_rate
lowerCAmelCase__ : int = sampling_rate
lowerCAmelCase__ : Union[str, Any] = padding_value
lowerCAmelCase__ : Dict = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=a , min_frequency=0.0 , max_frequency=2_2_0_5_0.0 , sampling_rate=a , norm='slaney' , mel_scale='slaney' , ).T
def _lowerCamelCase ( self : Optional[int] , a : np.array ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = spectrogram(
a , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=8_0.0 , )
lowerCAmelCase__ : Any = log_spec[:, :-1]
lowerCAmelCase__ : Dict = log_spec - 2_0.0
lowerCAmelCase__ : Tuple = np.clip(log_spec / 4_0.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : str , a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a : Optional[Union[str, TensorType]] = None , a : Optional[bool] = True , a : Optional[int] = None , a : bool = False , a : bool = False , **a : int , ):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
f''' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'''
f''' with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
lowerCAmelCase__ : Dict = isinstance(a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCAmelCase__ : List[Any] = is_batched_numpy or (
isinstance(a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase__ : int = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(a , np.ndarray ):
lowerCAmelCase__ : Optional[Any] = np.asarray(a , dtype=np.floataa )
elif isinstance(a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase__ : str = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase__ : Tuple = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
lowerCAmelCase__ : int = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , a ):
lowerCAmelCase__ : Optional[Any] = [np.asarray(a , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
lowerCAmelCase__ : Optional[Any] = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
lowerCAmelCase__ : Any = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
lowerCAmelCase__ : List[Any] = np.array(a ).astype(np.floataa )
# convert into correct format for padding
lowerCAmelCase__ : int = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
lowerCAmelCase__ : Dict = np.ones([len(a ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
lowerCAmelCase__ : Optional[Any] = padded_audio_features * self.padding_value
for i in range(len(a ) ):
lowerCAmelCase__ : Tuple = audio_features[i]
lowerCAmelCase__ : List[str] = feature
# return as BatchFeature
if return_attention_mask:
lowerCAmelCase__ : Tuple = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
lowerCAmelCase__ : Any = {'audio_values': padded_audio_features}
lowerCAmelCase__ : Any = BatchFeature(data=a , tensor_type=a )
return encoded_inputs | 69 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCamelCase__ = {"""configuration_mbart""": ["""MBART_PRETRAINED_CONFIG_ARCHIVE_MAP""", """MBartConfig""", """MBartOnnxConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""MBartTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""MBartTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""MBART_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""MBartForCausalLM""",
"""MBartForConditionalGeneration""",
"""MBartForQuestionAnswering""",
"""MBartForSequenceClassification""",
"""MBartModel""",
"""MBartPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""TFMBartForConditionalGeneration""",
"""TFMBartModel""",
"""TFMBartPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""FlaxMBartForConditionalGeneration""",
"""FlaxMBartForQuestionAnswering""",
"""FlaxMBartForSequenceClassification""",
"""FlaxMBartModel""",
"""FlaxMBartPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 69 |
import unittest
from transformers import DonutProcessor
lowerCamelCase__ = """naver-clova-ix/donut-base"""
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = DonutProcessor.from_pretrained(a )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = {
'name': 'John Doe',
'age': '99',
'city': 'Atlanta',
'state': 'GA',
'zip': '30301',
'phone': '123-4567',
'nicknames': [{'nickname': 'Johnny'}, {'nickname': 'JD'}],
}
lowerCAmelCase__ : Union[str, Any] = (
'<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>'
'<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>'
'<s_nicknames><s_nickname>Johnny</s_nickname>'
'<sep/><s_nickname>JD</s_nickname></s_nicknames>'
)
lowerCAmelCase__ : Optional[Any] = self.processor.tokenajson(a )
self.assertDictEqual(a , a ) | 69 | 1 |
from __future__ import annotations
lowerCamelCase__ = """Muhammad Umer Farooq"""
lowerCamelCase__ = """MIT"""
lowerCamelCase__ = """1.0.0"""
lowerCamelCase__ = """Muhammad Umer Farooq"""
lowerCamelCase__ = """contact@muhammadumerfarooq.me"""
lowerCamelCase__ = """Alpha"""
import re
from html.parser import HTMLParser
from urllib import parse
import requests
class A__ ( __magic_name__ ):
def __init__( self : Optional[int] , a : str ):
'''simple docstring'''
super().__init__()
lowerCAmelCase__ : list[str] = []
lowerCAmelCase__ : Union[str, Any] = domain
def _lowerCamelCase ( self : int , a : str , a : list[tuple[str, str | None]] ):
'''simple docstring'''
if tag == "a":
# Check the list of defined attributes.
for name, value in attrs:
# If href is defined, and not empty nor # print it.
if name == "href" and value != "#" and value != "":
# If not already in urls.
if value not in self.urls:
lowerCAmelCase__ : int = parse.urljoin(self.domain , a )
self.urls.append(a )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
return ".".join(get_sub_domain_name(SCREAMING_SNAKE_CASE_ ).split('.' )[-2:] )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> str:
return parse.urlparse(SCREAMING_SNAKE_CASE_ ).netloc
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "https://github.com" ) -> list[str]:
lowerCAmelCase__ : Optional[Any] = get_domain_name(SCREAMING_SNAKE_CASE_ )
# Initialize the parser
lowerCAmelCase__ : Optional[int] = Parser(SCREAMING_SNAKE_CASE_ )
try:
# Open URL
lowerCAmelCase__ : Union[str, Any] = requests.get(SCREAMING_SNAKE_CASE_ )
# pass the raw HTML to the parser to get links
parser.feed(r.text )
# Get links and loop through
lowerCAmelCase__ : Optional[Any] = set()
for link in parser.urls:
# open URL.
# read = requests.get(link)
try:
lowerCAmelCase__ : int = requests.get(SCREAMING_SNAKE_CASE_ )
# Get the valid email.
lowerCAmelCase__ : Union[str, Any] = re.findall('[a-zA-Z0-9]+@' + domain , read.text )
# If not in list then append it.
for email in emails:
valid_emails.add(SCREAMING_SNAKE_CASE_ )
except ValueError:
pass
except ValueError:
raise SystemExit(1 )
# Finally return a sorted list of email addresses with no duplicates.
return sorted(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCamelCase__ = emails_from_url("""https://github.com""")
print(F"""{len(emails)} emails found:""")
print("""\n""".join(sorted(emails))) | 69 |
from numpy import exp, pi, sqrt
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ = 0.0 , SCREAMING_SNAKE_CASE_ = 1.0 ) -> int:
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 69 | 1 |
# Lint as: python3
import dataclasses
import re
from dataclasses import dataclass
from functools import total_ordering
from typing import Optional, Union
lowerCamelCase__ = re.compile(r"""^(?P<major>\d+)""" r"""\.(?P<minor>\d+)""" r"""\.(?P<patch>\d+)$""")
@total_ordering
@dataclass
class A__ :
lowercase = 42
lowercase = None
lowercase = None
lowercase = None
lowercase = None
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : int = _str_to_version_tuple(self.version_str )
def __repr__( self : Tuple ):
'''simple docstring'''
return f'''{self.tuple[0]}.{self.tuple[1]}.{self.tuple[2]}'''
@property
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
return self.major, self.minor, self.patch
def _lowerCamelCase ( self : int , a : str ):
'''simple docstring'''
if isinstance(a , a ):
return Version(a )
elif isinstance(a , a ):
return other
raise TypeError(f'''{other} (type {type(a )}) cannot be compared to version.''' )
def __eq__( self : Optional[Any] , a : Tuple ):
'''simple docstring'''
try:
lowerCAmelCase__ : Optional[Any] = self._validate_operand(a )
except (TypeError, ValueError):
return False
else:
return self.tuple == other.tuple
def __lt__( self : Optional[int] , a : Dict ):
'''simple docstring'''
lowerCAmelCase__ : str = self._validate_operand(a )
return self.tuple < other.tuple
def __hash__( self : List[Any] ):
'''simple docstring'''
return hash(_version_tuple_to_str(self.tuple ) )
@classmethod
def _lowerCamelCase ( cls : Optional[int] , a : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = {f.name for f in dataclasses.fields(cls )}
return cls(**{k: v for k, v in dic.items() if k in field_names} )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return self.version_str
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
lowerCAmelCase__ : Optional[int] = _VERSION_REG.match(SCREAMING_SNAKE_CASE_ )
if not res:
raise ValueError(F'''Invalid version \'{version_str}\'. Format should be x.y.z with {{x,y,z}} being digits.''' )
return tuple(int(SCREAMING_SNAKE_CASE_ ) for v in [res.group('major' ), res.group('minor' ), res.group('patch' )] )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
return ".".join(str(SCREAMING_SNAKE_CASE_ ) for v in version_tuple ) | 69 |
import json
import os
import unittest
from transformers.models.xlm.tokenization_xlm import VOCAB_FILES_NAMES, XLMTokenizer
from transformers.testing_utils import slow
from ...test_tokenization_common import TokenizerTesterMixin
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = XLMTokenizer
lowercase = False
def _lowerCamelCase ( self : int ):
'''simple docstring'''
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
lowerCAmelCase__ : List[str] = [
'l',
'o',
'w',
'e',
'r',
's',
't',
'i',
'd',
'n',
'w</w>',
'r</w>',
't</w>',
'lo',
'low',
'er</w>',
'low</w>',
'lowest</w>',
'newer</w>',
'wider</w>',
'<unk>',
]
lowerCAmelCase__ : Any = dict(zip(a , range(len(a ) ) ) )
lowerCAmelCase__ : Optional[int] = ['l o 123', 'lo w 1456', 'e r</w> 1789', '']
lowerCAmelCase__ : Any = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
lowerCAmelCase__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file , 'w' ) as fp:
fp.write(json.dumps(a ) )
with open(self.merges_file , 'w' ) as fp:
fp.write('\n'.join(a ) )
def _lowerCamelCase ( self : List[str] , a : Dict ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = 'lower newer'
lowerCAmelCase__ : Any = 'lower newer'
return input_text, output_text
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = XLMTokenizer(self.vocab_file , self.merges_file )
lowerCAmelCase__ : Optional[int] = 'lower'
lowerCAmelCase__ : Optional[Any] = ['low', 'er</w>']
lowerCAmelCase__ : Dict = tokenizer.tokenize(a )
self.assertListEqual(a , a )
lowerCAmelCase__ : Tuple = tokens + ['<unk>']
lowerCAmelCase__ : Optional[int] = [14, 15, 20]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a ) , a )
@slow
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = XLMTokenizer.from_pretrained('xlm-mlm-en-2048' )
lowerCAmelCase__ : Any = tokenizer.encode('sequence builders' , add_special_tokens=a )
lowerCAmelCase__ : Union[str, Any] = tokenizer.encode('multi-sequence build' , add_special_tokens=a )
lowerCAmelCase__ : List[Any] = tokenizer.build_inputs_with_special_tokens(a )
lowerCAmelCase__ : List[str] = tokenizer.build_inputs_with_special_tokens(a , a )
assert encoded_sentence == [0] + text + [1]
assert encoded_pair == [0] + text + [1] + text_a + [1] | 69 | 1 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> int:
return getitem, k
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> int:
return setitem, k, v
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[str]:
return delitem, k
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ) -> Tuple:
try:
return fun(SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ ), None
except Exception as e:
return None, e
lowerCamelCase__ = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
lowerCamelCase__ = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
lowerCamelCase__ = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
lowerCamelCase__ = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
lowerCamelCase__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
lowerCamelCase__ = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
'operations' , (
pytest.param(_add_items , id='add items' ),
pytest.param(_overwrite_items , id='overwrite items' ),
pytest.param(_delete_items , id='delete items' ),
pytest.param(_access_absent_items , id='access absent items' ),
pytest.param(_add_with_resize_up , id='add with resize up' ),
pytest.param(_add_with_resize_down , id='add with resize down' ),
) , )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> int:
lowerCAmelCase__ : List[str] = HashMap(initial_block_size=4 )
lowerCAmelCase__ : Optional[int] = {}
for _, (fun, *args) in enumerate(SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ , lowerCAmelCase__ : int = _run_operation(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ , lowerCAmelCase__ : str = _run_operation(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , *SCREAMING_SNAKE_CASE_ )
assert my_res == py_res
assert str(SCREAMING_SNAKE_CASE_ ) == str(SCREAMING_SNAKE_CASE_ )
assert set(SCREAMING_SNAKE_CASE_ ) == set(SCREAMING_SNAKE_CASE_ )
assert len(SCREAMING_SNAKE_CASE_ ) == len(SCREAMING_SNAKE_CASE_ )
assert set(my.items() ) == set(py.items() )
def lowerCAmelCase__ ( ) -> Optional[int]:
def is_public(SCREAMING_SNAKE_CASE_ ) -> bool:
return not name.startswith('_' )
lowerCAmelCase__ : Any = {name for name in dir({} ) if is_public(SCREAMING_SNAKE_CASE_ )}
lowerCAmelCase__ : Union[str, Any] = {name for name in dir(HashMap() ) if is_public(SCREAMING_SNAKE_CASE_ )}
assert dict_public_names > hash_public_names | 69 |
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[str]:
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = image.size
lowerCAmelCase__ , lowerCAmelCase__ : str = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
lowerCAmelCase__ : Any = image.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] )
lowerCAmelCase__ : int = np.array(SCREAMING_SNAKE_CASE_ ).astype(np.floataa ) / 255.0
lowerCAmelCase__ : Optional[int] = image[None].transpose(0 , 3 , 1 , 2 )
lowerCAmelCase__ : List[Any] = torch.from_numpy(SCREAMING_SNAKE_CASE_ )
return 2.0 * image - 1.0
class A__ ( __magic_name__ ):
def __init__( self : List[str] , a : VQModel , a : UNetaDModel , a : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] , ):
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=a , unet=a , scheduler=a )
@torch.no_grad()
def __call__( self : int , a : Union[torch.Tensor, PIL.Image.Image] = None , a : Optional[int] = 1 , a : Optional[int] = 100 , a : Optional[float] = 0.0 , a : Optional[Union[torch.Generator, List[torch.Generator]]] = None , a : Optional[str] = "pil" , a : bool = True , ):
'''simple docstring'''
if isinstance(a , PIL.Image.Image ):
lowerCAmelCase__ : str = 1
elif isinstance(a , torch.Tensor ):
lowerCAmelCase__ : Union[str, Any] = image.shape[0]
else:
raise ValueError(f'''`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(a )}''' )
if isinstance(a , PIL.Image.Image ):
lowerCAmelCase__ : List[Any] = preprocess(a )
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
lowerCAmelCase__ : Optional[int] = (batch_size, self.unet.config.in_channels // 2, height, width)
lowerCAmelCase__ : Optional[Any] = next(self.unet.parameters() ).dtype
lowerCAmelCase__ : List[str] = randn_tensor(a , generator=a , device=self.device , dtype=a )
lowerCAmelCase__ : Any = image.to(device=self.device , dtype=a )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(a , device=self.device )
lowerCAmelCase__ : Optional[Any] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
lowerCAmelCase__ : Optional[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
lowerCAmelCase__ : Union[str, Any] = 'eta' in set(inspect.signature(self.scheduler.step ).parameters.keys() )
lowerCAmelCase__ : List[str] = {}
if accepts_eta:
lowerCAmelCase__ : List[Any] = eta
for t in self.progress_bar(a ):
# concat latents and low resolution image in the channel dimension.
lowerCAmelCase__ : Union[str, Any] = torch.cat([latents, image] , dim=1 )
lowerCAmelCase__ : Dict = self.scheduler.scale_model_input(a , a )
# predict the noise residual
lowerCAmelCase__ : Tuple = self.unet(a , a ).sample
# compute the previous noisy sample x_t -> x_t-1
lowerCAmelCase__ : List[str] = self.scheduler.step(a , a , a , **a ).prev_sample
# decode the image latents with the VQVAE
lowerCAmelCase__ : Dict = self.vqvae.decode(a ).sample
lowerCAmelCase__ : Tuple = torch.clamp(a , -1.0 , 1.0 )
lowerCAmelCase__ : Tuple = image / 2 + 0.5
lowerCAmelCase__ : List[Any] = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
lowerCAmelCase__ : int = self.numpy_to_pil(a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=a ) | 69 | 1 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCamelCase__ = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase__ = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowerCamelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCamelCase__ = re.compile(r"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
lowerCamelCase__ = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[str]:
lowerCAmelCase__ : int = None
# source code of `config_class`
lowerCAmelCase__ : Optional[int] = inspect.getsource(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = _re_checkpoint.findall(SCREAMING_SNAKE_CASE_ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('/' ):
lowerCAmelCase__ : Union[str, Any] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
lowerCAmelCase__ : Dict = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
lowerCAmelCase__ : str = ckpt_name
break
return checkpoint
def lowerCAmelCase__ ( ) -> int:
lowerCAmelCase__ : Union[str, Any] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
lowerCAmelCase__ : Union[str, Any] = get_checkpoint_from_config_class(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[Any] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
lowerCAmelCase__ : List[str] = '\n'.join(sorted(SCREAMING_SNAKE_CASE_ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints() | 69 |
import os
from collections import deque
import torch
from torch.utils.data import Dataset
class A__ ( __magic_name__ ):
def __init__( self : Union[str, Any] , a : str="" , a : str="train" ):
'''simple docstring'''
assert os.path.isdir(a )
lowerCAmelCase__ : Optional[Any] = []
lowerCAmelCase__ : Dict = os.listdir(a )
for story_filename in story_filenames_list:
if "summary" in story_filename:
continue
lowerCAmelCase__ : Union[str, Any] = os.path.join(a , a )
if not os.path.isfile(a ):
continue
self.documents.append(a )
def __len__( self : Any ):
'''simple docstring'''
return len(self.documents )
def __getitem__( self : Dict , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.documents[idx]
lowerCAmelCase__ : Union[str, Any] = document_path.split('/' )[-1]
with open(a , encoding='utf-8' ) as source:
lowerCAmelCase__ : List[Any] = source.read()
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = process_story(a )
return document_name, story_lines, summary_lines
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Tuple:
lowerCAmelCase__ : Optional[int] = list(filter(lambda SCREAMING_SNAKE_CASE_ : len(SCREAMING_SNAKE_CASE_ ) != 0 , [line.strip() for line in raw_story.split('\n' )] ) )
# for some unknown reason some lines miss a period, add it
lowerCAmelCase__ : List[Any] = [_add_missing_period(SCREAMING_SNAKE_CASE_ ) for line in nonempty_lines]
# gather article lines
lowerCAmelCase__ : int = []
lowerCAmelCase__ : Any = deque(SCREAMING_SNAKE_CASE_ )
while True:
try:
lowerCAmelCase__ : int = lines.popleft()
if element.startswith('@highlight' ):
break
story_lines.append(SCREAMING_SNAKE_CASE_ )
except IndexError:
# if "@highlight" is absent from the file we pop
# all elements until there is None, raising an exception.
return story_lines, []
# gather summary lines
lowerCAmelCase__ : Tuple = list(filter(lambda SCREAMING_SNAKE_CASE_ : not t.startswith('@highlight' ) , SCREAMING_SNAKE_CASE_ ) )
return story_lines, summary_lines
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Any:
lowerCAmelCase__ : int = ['.', '!', '?', '...', '\'', '`', '"', '\u2019', '\u2019', ')']
if line.startswith('@highlight' ):
return line
if line[-1] in END_TOKENS:
return line
return line + "."
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
if len(SCREAMING_SNAKE_CASE_ ) > block_size:
return sequence[:block_size]
else:
sequence.extend([pad_token_id] * (block_size - len(SCREAMING_SNAKE_CASE_ )) )
return sequence
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[int]:
lowerCAmelCase__ : str = torch.ones_like(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : int = sequence == pad_token_id
lowerCAmelCase__ : Optional[int] = 0
return mask
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Tuple:
lowerCAmelCase__ : Any = [tokenizer.encode(SCREAMING_SNAKE_CASE_ ) for line in story_lines]
lowerCAmelCase__ : str = [token for sentence in story_lines_token_ids for token in sentence]
lowerCAmelCase__ : Dict = [tokenizer.encode(SCREAMING_SNAKE_CASE_ ) for line in summary_lines]
lowerCAmelCase__ : str = [token for sentence in summary_lines_token_ids for token in sentence]
return story_token_ids, summary_token_ids
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
lowerCAmelCase__ : Optional[int] = []
for sequence in batch:
lowerCAmelCase__ : Union[str, Any] = -1
lowerCAmelCase__ : int = []
for s in sequence:
if s == separator_token_id:
sentence_num += 1
embeddings.append(sentence_num % 2 )
batch_embeddings.append(SCREAMING_SNAKE_CASE_ )
return torch.tensor(SCREAMING_SNAKE_CASE_ ) | 69 | 1 |
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = {
"""huggingface/autoformer-tourism-monthly""": """https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json""",
}
class A__ ( __magic_name__ ):
lowercase = 'autoformer'
lowercase = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : Optional[int] , a : Optional[int] = None , a : Optional[int] = None , a : str = "student_t" , a : str = "nll" , a : int = 1 , a : List[int] = [1, 2, 3, 4, 5, 6, 7] , a : bool = True , a : int = 0 , a : int = 0 , a : int = 0 , a : int = 0 , a : Optional[List[int]] = None , a : Optional[List[int]] = None , a : int = 64 , a : int = 2 , a : int = 2 , a : int = 2 , a : int = 2 , a : int = 32 , a : int = 32 , a : str = "gelu" , a : float = 0.1 , a : float = 0.1 , a : float = 0.1 , a : float = 0.1 , a : float = 0.1 , a : int = 100 , a : float = 0.0_2 , a : bool = True , a : Optional[Any]=True , a : int = 10 , a : int = 25 , a : int = 3 , **a : Optional[Any] , ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = prediction_length
lowerCAmelCase__ : Tuple = context_length if context_length is not None else prediction_length
lowerCAmelCase__ : Optional[int] = distribution_output
lowerCAmelCase__ : Union[str, Any] = loss
lowerCAmelCase__ : Optional[Any] = input_size
lowerCAmelCase__ : Tuple = num_time_features
lowerCAmelCase__ : Optional[Any] = lags_sequence
lowerCAmelCase__ : Union[str, Any] = scaling
lowerCAmelCase__ : List[str] = num_dynamic_real_features
lowerCAmelCase__ : Dict = num_static_real_features
lowerCAmelCase__ : Dict = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(a ) != num_static_categorical_features:
raise ValueError(
'The cardinality should be a list of the same length as `num_static_categorical_features`' )
lowerCAmelCase__ : List[Any] = cardinality
else:
lowerCAmelCase__ : Tuple = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(a ) != num_static_categorical_features:
raise ValueError(
'The embedding dimension should be a list of the same length as `num_static_categorical_features`' )
lowerCAmelCase__ : str = embedding_dimension
else:
lowerCAmelCase__ : List[str] = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
lowerCAmelCase__ : Any = num_parallel_samples
# Transformer architecture configuration
lowerCAmelCase__ : Optional[Any] = input_size * len(self.lags_sequence ) + self._number_of_features
lowerCAmelCase__ : List[str] = d_model
lowerCAmelCase__ : Union[str, Any] = encoder_attention_heads
lowerCAmelCase__ : Optional[Any] = decoder_attention_heads
lowerCAmelCase__ : Union[str, Any] = encoder_ffn_dim
lowerCAmelCase__ : Optional[Any] = decoder_ffn_dim
lowerCAmelCase__ : Optional[Any] = encoder_layers
lowerCAmelCase__ : Optional[Any] = decoder_layers
lowerCAmelCase__ : Optional[int] = dropout
lowerCAmelCase__ : Any = attention_dropout
lowerCAmelCase__ : str = activation_dropout
lowerCAmelCase__ : Optional[int] = encoder_layerdrop
lowerCAmelCase__ : List[Any] = decoder_layerdrop
lowerCAmelCase__ : str = activation_function
lowerCAmelCase__ : Tuple = init_std
lowerCAmelCase__ : int = use_cache
# Autoformer
lowerCAmelCase__ : int = label_length
lowerCAmelCase__ : Any = moving_average
lowerCAmelCase__ : Tuple = autocorrelation_factor
super().__init__(is_encoder_decoder=a , **a )
@property
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
) | 69 |
import inspect
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
lowerCamelCase__ = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
lowerCamelCase__ = direct_transformers_import(PATH_TO_TRANSFORMERS)
lowerCamelCase__ = transformers.models.auto.configuration_auto.CONFIG_MAPPING
# Regex pattern used to find the checkpoint mentioned in the docstring of `config_class`.
# For example, `[bert-base-uncased](https://huggingface.co/bert-base-uncased)`
lowerCamelCase__ = re.compile(r"""\[(.+?)\]\((https://huggingface\.co/.+?)\)""")
lowerCamelCase__ = {
"""DecisionTransformerConfig""",
"""EncoderDecoderConfig""",
"""MusicgenConfig""",
"""RagConfig""",
"""SpeechEncoderDecoderConfig""",
"""TimmBackboneConfig""",
"""VisionEncoderDecoderConfig""",
"""VisionTextDualEncoderConfig""",
"""LlamaConfig""",
}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[str]:
lowerCAmelCase__ : int = None
# source code of `config_class`
lowerCAmelCase__ : Optional[int] = inspect.getsource(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = _re_checkpoint.findall(SCREAMING_SNAKE_CASE_ )
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('bert-base-uncased', 'https://huggingface.co/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
if ckpt_link.endswith('/' ):
lowerCAmelCase__ : Union[str, Any] = ckpt_link[:-1]
# verify the checkpoint name corresponds to the checkpoint link
lowerCAmelCase__ : Dict = F'''https://huggingface.co/{ckpt_name}'''
if ckpt_link == ckpt_link_from_name:
lowerCAmelCase__ : str = ckpt_name
break
return checkpoint
def lowerCAmelCase__ ( ) -> int:
lowerCAmelCase__ : Union[str, Any] = []
for config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in config_class.__module__:
continue
lowerCAmelCase__ : Union[str, Any] = get_checkpoint_from_config_class(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[Any] = config_class.__name__
if checkpoint is None and name not in CONFIG_CLASSES_TO_IGNORE_FOR_DOCSTRING_CHECKPOINT_CHECK:
configs_without_checkpoint.append(SCREAMING_SNAKE_CASE_ )
if len(SCREAMING_SNAKE_CASE_ ) > 0:
lowerCAmelCase__ : List[str] = '\n'.join(sorted(SCREAMING_SNAKE_CASE_ ) )
raise ValueError(F'''The following configurations don\'t contain any valid checkpoint:\n{message}''' )
if __name__ == "__main__":
check_config_docstrings_have_checkpoints() | 69 | 1 |
from itertools import permutations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bool:
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
lowerCAmelCase__ : str = [7, 11, 13, 17]
for i, test in enumerate(SCREAMING_SNAKE_CASE_ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = 10 ) -> int:
return sum(
int(''.join(map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) )
for num in permutations(range(SCREAMING_SNAKE_CASE_ ) )
if is_substring_divisible(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
print(F"""{solution() = }""") | 69 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCamelCase__ = {
"""configuration_luke""": ["""LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LukeConfig"""],
"""tokenization_luke""": ["""LukeTokenizer"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""LUKE_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LukeForEntityClassification""",
"""LukeForEntityPairClassification""",
"""LukeForEntitySpanClassification""",
"""LukeForMultipleChoice""",
"""LukeForQuestionAnswering""",
"""LukeForSequenceClassification""",
"""LukeForTokenClassification""",
"""LukeForMaskedLM""",
"""LukeModel""",
"""LukePreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_luke import LUKE_PRETRAINED_CONFIG_ARCHIVE_MAP, LukeConfig
from .tokenization_luke import LukeTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_luke import (
LUKE_PRETRAINED_MODEL_ARCHIVE_LIST,
LukeForEntityClassification,
LukeForEntityPairClassification,
LukeForEntitySpanClassification,
LukeForMaskedLM,
LukeForMultipleChoice,
LukeForQuestionAnswering,
LukeForSequenceClassification,
LukeForTokenClassification,
LukeModel,
LukePreTrainedModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 69 | 1 |
import mpmath # for roots of unity
import numpy as np
class A__ :
def __init__( self : Optional[Any] , a : Tuple=None , a : int=None ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = list(poly_a or [0] )[:]
lowerCAmelCase__ : List[Any] = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
lowerCAmelCase__ : Any = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
lowerCAmelCase__ : Tuple = len(self.polyB )
# Add 0 to make lengths equal a power of 2
lowerCAmelCase__ : Tuple = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
lowerCAmelCase__ : List[Any] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
lowerCAmelCase__ : Optional[int] = self.__multiply()
def _lowerCamelCase ( self : Tuple , a : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Dict = [[x] for x in self.polyA] if which == 'A' else [[x] for x in self.polyB]
# Corner case
if len(a ) <= 1:
return dft[0]
#
lowerCAmelCase__ : Dict = self.c_max_length // 2
while next_ncol > 0:
lowerCAmelCase__ : Optional[int] = [[] for i in range(a )]
lowerCAmelCase__ : int = self.root**next_ncol
# First half of next step
lowerCAmelCase__ : Tuple = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(a ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
lowerCAmelCase__ : Optional[Any] = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(a ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
lowerCAmelCase__ : List[str] = new_dft
lowerCAmelCase__ : Optional[int] = next_ncol // 2
return dft[0]
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.__dft('A' )
lowerCAmelCase__ : str = self.__dft('B' )
lowerCAmelCase__ : Tuple = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
lowerCAmelCase__ : List[str] = 2
while next_ncol <= self.c_max_length:
lowerCAmelCase__ : Optional[int] = [[] for i in range(a )]
lowerCAmelCase__ : Union[str, Any] = self.root ** (next_ncol // 2)
lowerCAmelCase__ : List[Any] = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
lowerCAmelCase__ : Optional[int] = new_inverse_c
next_ncol *= 2
# Unpack
lowerCAmelCase__ : Optional[int] = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1J for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = 'A = ' + ' + '.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
lowerCAmelCase__ : Optional[int] = 'B = ' + ' + '.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
lowerCAmelCase__ : Any = 'A*B = ' + ' + '.join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return f'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod() | 69 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
lowerCamelCase__ = {
"""configuration_chinese_clip""": [
"""CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""ChineseCLIPConfig""",
"""ChineseCLIPOnnxConfig""",
"""ChineseCLIPTextConfig""",
"""ChineseCLIPVisionConfig""",
],
"""processing_chinese_clip""": ["""ChineseCLIPProcessor"""],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = ["""ChineseCLIPFeatureExtractor"""]
lowerCamelCase__ = ["""ChineseCLIPImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCamelCase__ = [
"""CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ChineseCLIPModel""",
"""ChineseCLIPPreTrainedModel""",
"""ChineseCLIPTextModel""",
"""ChineseCLIPVisionModel""",
]
if TYPE_CHECKING:
from .configuration_chinese_clip import (
CHINESE_CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
ChineseCLIPConfig,
ChineseCLIPOnnxConfig,
ChineseCLIPTextConfig,
ChineseCLIPVisionConfig,
)
from .processing_chinese_clip import ChineseCLIPProcessor
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_chinese_clip import ChineseCLIPFeatureExtractor, ChineseCLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_chinese_clip import (
CHINESE_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
ChineseCLIPModel,
ChineseCLIPPreTrainedModel,
ChineseCLIPTextModel,
ChineseCLIPVisionModel,
)
else:
import sys
lowerCamelCase__ = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__) | 69 | 1 |
from __future__ import annotations
import unittest
from transformers import DebertaVaConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
TFDebertaVaModel,
)
class A__ :
def __init__( self : str , a : Optional[Any] , a : List[str]=13 , a : Union[str, Any]=7 , a : Dict=True , a : List[Any]=True , a : Union[str, Any]=True , a : Tuple=True , a : List[Any]=99 , a : int=32 , a : Optional[Any]=2 , a : Optional[Any]=4 , a : List[str]=37 , a : List[Any]="gelu" , a : Union[str, Any]=0.1 , a : List[str]=0.1 , a : Any=512 , a : List[Any]=16 , a : int=2 , a : List[Any]=0.0_2 , a : Tuple=False , a : List[str]=True , a : Optional[Any]="None" , a : List[Any]=3 , a : Any=4 , a : List[str]=None , ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = parent
lowerCAmelCase__ : Optional[int] = batch_size
lowerCAmelCase__ : Tuple = seq_length
lowerCAmelCase__ : List[Any] = is_training
lowerCAmelCase__ : str = use_input_mask
lowerCAmelCase__ : Dict = use_token_type_ids
lowerCAmelCase__ : Optional[int] = use_labels
lowerCAmelCase__ : Any = vocab_size
lowerCAmelCase__ : Optional[int] = hidden_size
lowerCAmelCase__ : int = num_hidden_layers
lowerCAmelCase__ : Optional[int] = num_attention_heads
lowerCAmelCase__ : Tuple = intermediate_size
lowerCAmelCase__ : Optional[Any] = hidden_act
lowerCAmelCase__ : List[str] = hidden_dropout_prob
lowerCAmelCase__ : Dict = attention_probs_dropout_prob
lowerCAmelCase__ : Optional[int] = max_position_embeddings
lowerCAmelCase__ : str = type_vocab_size
lowerCAmelCase__ : int = type_sequence_label_size
lowerCAmelCase__ : Any = initializer_range
lowerCAmelCase__ : Optional[int] = num_labels
lowerCAmelCase__ : Optional[int] = num_choices
lowerCAmelCase__ : str = relative_attention
lowerCAmelCase__ : Any = position_biased_input
lowerCAmelCase__ : Tuple = pos_att_type
lowerCAmelCase__ : Dict = scope
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Union[str, Any] = None
if self.use_input_mask:
lowerCAmelCase__ : Tuple = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : List[str] = None
if self.use_token_type_ids:
lowerCAmelCase__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
lowerCAmelCase__ : Any = None
lowerCAmelCase__ : Union[str, Any] = None
lowerCAmelCase__ : Tuple = None
if self.use_labels:
lowerCAmelCase__ : Optional[int] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ : Any = DebertaVaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , initializer_range=self.initializer_range , return_dict=a , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : Optional[int] , a : str , a : Tuple , a : List[str] , a : List[str] , a : Tuple , a : Optional[int] , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = TFDebertaVaModel(config=a )
lowerCAmelCase__ : Dict = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
lowerCAmelCase__ : int = [input_ids, input_mask]
lowerCAmelCase__ : Any = model(a )
lowerCAmelCase__ : Dict = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : Optional[Any] , a : Dict , a : Optional[int] , a : List[Any] , a : Union[str, Any] , a : Optional[int] , a : Tuple , a : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = TFDebertaVaForMaskedLM(config=a )
lowerCAmelCase__ : Any = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase__ : List[str] = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : List[Any] , a : Union[str, Any] , a : Optional[Any] , a : Tuple , a : Union[str, Any] , a : List[str] , a : List[str] , a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = self.num_labels
lowerCAmelCase__ : List[Any] = TFDebertaVaForSequenceClassification(config=a )
lowerCAmelCase__ : Any = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase__ : Tuple = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self : Tuple , a : int , a : str , a : int , a : int , a : Tuple , a : Tuple , a : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.num_labels
lowerCAmelCase__ : Any = TFDebertaVaForTokenClassification(config=a )
lowerCAmelCase__ : List[str] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase__ : int = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self : Optional[Any] , a : Optional[int] , a : Tuple , a : Tuple , a : Any , a : str , a : List[str] , a : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = TFDebertaVaForQuestionAnswering(config=a )
lowerCAmelCase__ : Tuple = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
lowerCAmelCase__ : Union[str, Any] = model(a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.prepare_config_and_inputs()
(
(
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) , (
lowerCAmelCase__
) ,
) : Tuple = config_and_inputs
lowerCAmelCase__ : Any = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class A__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(
TFDebertaVaModel,
TFDebertaVaForMaskedLM,
TFDebertaVaForQuestionAnswering,
TFDebertaVaForSequenceClassification,
TFDebertaVaForTokenClassification,
)
if is_tf_available()
else ()
)
lowercase = (
{
'feature-extraction': TFDebertaVaModel,
'fill-mask': TFDebertaVaForMaskedLM,
'question-answering': TFDebertaVaForQuestionAnswering,
'text-classification': TFDebertaVaForSequenceClassification,
'token-classification': TFDebertaVaForTokenClassification,
'zero-shot': TFDebertaVaForSequenceClassification,
}
if is_tf_available()
else {}
)
lowercase = False
lowercase = False
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Dict = TFDebertaVaModelTester(self )
lowerCAmelCase__ : Union[str, Any] = ConfigTester(self , config_class=a , hidden_size=37 )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*a )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*a )
@slow
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' )
self.assertIsNotNone(a )
@require_tf
class A__ ( unittest.TestCase ):
@unittest.skip(reason='Model not available yet' )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
pass
@slow
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = TFDebertaVaModel.from_pretrained('kamalkraj/deberta-v2-xlarge' )
lowerCAmelCase__ : Dict = tf.constant([[0, 31_414, 232, 328, 740, 1_140, 12_695, 69, 46_078, 1_588, 2]] )
lowerCAmelCase__ : int = tf.constant([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
lowerCAmelCase__ : int = model(a , attention_mask=a )[0]
lowerCAmelCase__ : List[str] = tf.constant(
[[[0.2_3_5_6, 0.1_9_4_8, 0.0_3_6_9], [-0.1_0_6_3, 0.3_5_8_6, -0.5_1_5_2], [-0.6_3_9_9, -0.0_2_5_9, -0.2_5_2_5]]] )
tf.debugging.assert_near(output[:, 1:4, 1:4] , a , atol=1E-4 ) | 69 |
import logging
import os
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List, Optional
import faiss
import torch
from datasets import Features, Sequence, Value, load_dataset
from transformers import DPRContextEncoder, DPRContextEncoderTokenizerFast, HfArgumentParser
lowerCamelCase__ = logging.getLogger(__name__)
torch.set_grad_enabled(False)
lowerCamelCase__ = """cuda""" if torch.cuda.is_available() else """cpu"""
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=100 , SCREAMING_SNAKE_CASE_=" " ) -> List[str]:
lowerCAmelCase__ : Optional[Any] = text.split(SCREAMING_SNAKE_CASE_ )
return [character.join(text[i : i + n] ).strip() for i in range(0 , len(SCREAMING_SNAKE_CASE_ ) , SCREAMING_SNAKE_CASE_ )]
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> dict:
lowerCAmelCase__ , lowerCAmelCase__ : int = [], []
for title, text in zip(documents['title'] , documents['text'] ):
if text is not None:
for passage in split_text(SCREAMING_SNAKE_CASE_ ):
titles.append(title if title is not None else '' )
texts.append(SCREAMING_SNAKE_CASE_ )
return {"title": titles, "text": texts}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> dict:
lowerCAmelCase__ : List[str] = ctx_tokenizer(
documents['title'] , documents['text'] , truncation=SCREAMING_SNAKE_CASE_ , padding='longest' , return_tensors='pt' )['input_ids']
lowerCAmelCase__ : Tuple = ctx_encoder(input_ids.to(device=SCREAMING_SNAKE_CASE_ ) , return_dict=SCREAMING_SNAKE_CASE_ ).pooler_output
return {"embeddings": embeddings.detach().cpu().numpy()}
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , ) -> Optional[Any]:
######################################
logger.info('Step 1 - Create the dataset' )
######################################
# The dataset needed for RAG must have three columns:
# - title (string): title of the document
# - text (string): text of a passage of the document
# - embeddings (array of dimension d): DPR representation of the passage
# Let's say you have documents in tab-separated csv files with columns "title" and "text"
assert os.path.isfile(rag_example_args.csv_path ), "Please provide a valid path to a csv file"
# You can load a Dataset object this way
lowerCAmelCase__ : str = load_dataset(
'csv' , data_files=[rag_example_args.csv_path] , split='train' , delimiter='\t' , column_names=['title', 'text'] )
# More info about loading csv files in the documentation: https://huggingface.co/docs/datasets/loading_datasets.html?highlight=csv#csv-files
# Then split the documents into passages of 100 words
lowerCAmelCase__ : Optional[Any] = dataset.map(SCREAMING_SNAKE_CASE_ , batched=SCREAMING_SNAKE_CASE_ , num_proc=processing_args.num_proc )
# And compute the embeddings
lowerCAmelCase__ : List[str] = DPRContextEncoder.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name ).to(device=SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : str = DPRContextEncoderTokenizerFast.from_pretrained(rag_example_args.dpr_ctx_encoder_model_name )
lowerCAmelCase__ : List[Any] = Features(
{'text': Value('string' ), 'title': Value('string' ), 'embeddings': Sequence(Value('float32' ) )} ) # optional, save as float32 instead of float64 to save space
lowerCAmelCase__ : List[Any] = dataset.map(
partial(SCREAMING_SNAKE_CASE_ , ctx_encoder=SCREAMING_SNAKE_CASE_ , ctx_tokenizer=SCREAMING_SNAKE_CASE_ ) , batched=SCREAMING_SNAKE_CASE_ , batch_size=processing_args.batch_size , features=SCREAMING_SNAKE_CASE_ , )
# And finally save your dataset
lowerCAmelCase__ : Optional[Any] = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset' )
dataset.save_to_disk(SCREAMING_SNAKE_CASE_ )
# from datasets import load_from_disk
# dataset = load_from_disk(passages_path) # to reload the dataset
######################################
logger.info('Step 2 - Index the dataset' )
######################################
# Let's use the Faiss implementation of HNSW for fast approximate nearest neighbor search
lowerCAmelCase__ : Optional[int] = faiss.IndexHNSWFlat(index_hnsw_args.d , index_hnsw_args.m , faiss.METRIC_INNER_PRODUCT )
dataset.add_faiss_index('embeddings' , custom_index=SCREAMING_SNAKE_CASE_ )
# And save the index
lowerCAmelCase__ : str = os.path.join(rag_example_args.output_dir , 'my_knowledge_dataset_hnsw_index.faiss' )
dataset.get_index('embeddings' ).save(SCREAMING_SNAKE_CASE_ )
# dataset.load_faiss_index("embeddings", index_path) # to reload the index
@dataclass
class A__ :
lowercase = field(
default=str(Path(__magic_name__ ).parent / 'test_run' / 'dummy-kb' / 'my_knowledge_dataset.csv' ) , metadata={'help': 'Path to a tab-separated csv file with columns \'title\' and \'text\''} , )
lowercase = field(
default=__magic_name__ , metadata={'help': 'Question that is passed as input to RAG. Default is \'What does Moses\' rod turn into ?\'.'} , )
lowercase = field(
default='facebook/rag-sequence-nq' , metadata={'help': 'The RAG model to use. Either \'facebook/rag-sequence-nq\' or \'facebook/rag-token-nq\''} , )
lowercase = field(
default='facebook/dpr-ctx_encoder-multiset-base' , metadata={
'help': (
'The DPR context encoder model to use. Either \'facebook/dpr-ctx_encoder-single-nq-base\' or'
' \'facebook/dpr-ctx_encoder-multiset-base\''
)
} , )
lowercase = field(
default=str(Path(__magic_name__ ).parent / 'test_run' / 'dummy-kb' ) , metadata={'help': 'Path to a directory where the dataset passages and the index will be saved'} , )
@dataclass
class A__ :
lowercase = field(
default=__magic_name__ , metadata={
'help': 'The number of processes to use to split the documents into passages. Default is single process.'
} , )
lowercase = field(
default=16 , metadata={
'help': 'The batch size to use when computing the passages embeddings using the DPR context encoder.'
} , )
@dataclass
class A__ :
lowercase = field(
default=768 , metadata={'help': 'The dimension of the embeddings to pass to the HNSW Faiss index.'} , )
lowercase = field(
default=128 , metadata={
'help': (
'The number of bi-directional links created for every new element during the HNSW index construction.'
)
} , )
if __name__ == "__main__":
logging.basicConfig(level=logging.WARNING)
logger.setLevel(logging.INFO)
lowerCamelCase__ = HfArgumentParser((RagExampleArguments, ProcessingArguments, IndexHnswArguments))
lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ = parser.parse_args_into_dataclasses()
with TemporaryDirectory() as tmp_dir:
lowerCamelCase__ = rag_example_args.output_dir or tmp_dir
main(rag_example_args, processing_args, index_hnsw_args) | 69 | 1 |
import requests
from bsa import BeautifulSoup
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = "https://www.worldometers.info/coronavirus" ) -> dict:
lowerCAmelCase__ : List[str] = BeautifulSoup(requests.get(SCREAMING_SNAKE_CASE_ ).text , 'html.parser' )
lowerCAmelCase__ : int = soup.findAll('h1' )
lowerCAmelCase__ : Optional[int] = soup.findAll('div' , {'class': 'maincounter-number'} )
keys += soup.findAll('span' , {'class': 'panel-title'} )
values += soup.findAll('div' , {'class': 'number-table-main'} )
return {key.text.strip(): value.text.strip() for key, value in zip(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )}
if __name__ == "__main__":
print("""\033[1m""" + """COVID-19 Status of the World""" + """\033[0m\n""")
for key, value in world_covidaa_stats().items():
print(F"""{key}\n{value}\n""") | 69 |
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class A__ ( __magic_name__ ):
lowercase = (DDPMParallelScheduler,)
def _lowerCamelCase ( self : str , **a : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : str = {
'num_train_timesteps': 1_000,
'beta_start': 0.0_0_0_1,
'beta_end': 0.0_2,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**a )
return config
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1_000]:
self.check_over_configs(num_train_timesteps=a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=a , beta_end=a )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=a )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=a )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
self.check_over_configs(thresholding=a )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=a , prediction_type=a , sample_max_value=a , )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=a )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=a )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = self.scheduler_classes[0]
lowerCAmelCase__ : Any = self.get_scheduler_config()
lowerCAmelCase__ : List[str] = scheduler_class(**a )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0_9_7_9 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.0_2 ) ) < 1E-5
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.scheduler_classes[0]
lowerCAmelCase__ : Any = self.get_scheduler_config()
lowerCAmelCase__ : int = scheduler_class(**a )
lowerCAmelCase__ : str = len(a )
lowerCAmelCase__ : Tuple = self.dummy_model()
lowerCAmelCase__ : Optional[Any] = self.dummy_sample_deter
lowerCAmelCase__ : int = self.dummy_sample_deter + 0.1
lowerCAmelCase__ : Union[str, Any] = self.dummy_sample_deter - 0.1
lowerCAmelCase__ : Tuple = samplea.shape[0]
lowerCAmelCase__ : List[Any] = torch.stack([samplea, samplea, samplea] , dim=0 )
lowerCAmelCase__ : Optional[Any] = torch.arange(a )[0:3, None].repeat(1 , a )
lowerCAmelCase__ : List[str] = model(samples.flatten(0 , 1 ) , timesteps.flatten(0 , 1 ) )
lowerCAmelCase__ : Tuple = scheduler.batch_step_no_noise(a , timesteps.flatten(0 , 1 ) , samples.flatten(0 , 1 ) )
lowerCAmelCase__ : str = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Any = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 1_1_5_3.1_8_3_3 ) < 1E-2
assert abs(result_mean.item() - 0.5_0_0_5 ) < 1E-3
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : str = self.scheduler_classes[0]
lowerCAmelCase__ : List[Any] = self.get_scheduler_config()
lowerCAmelCase__ : Dict = scheduler_class(**a )
lowerCAmelCase__ : str = len(a )
lowerCAmelCase__ : Any = self.dummy_model()
lowerCAmelCase__ : int = self.dummy_sample_deter
lowerCAmelCase__ : Tuple = torch.manual_seed(0 )
for t in reversed(range(a ) ):
# 1. predict noise residual
lowerCAmelCase__ : Optional[Any] = model(a , a )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase__ : int = scheduler.step(a , a , a , generator=a ).prev_sample
lowerCAmelCase__ : List[str] = pred_prev_sample
lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Optional[Any] = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 2_5_8.9_6_0_6 ) < 1E-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1E-3
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = self.scheduler_classes[0]
lowerCAmelCase__ : Dict = self.get_scheduler_config(prediction_type='v_prediction' )
lowerCAmelCase__ : int = scheduler_class(**a )
lowerCAmelCase__ : str = len(a )
lowerCAmelCase__ : Optional[int] = self.dummy_model()
lowerCAmelCase__ : List[str] = self.dummy_sample_deter
lowerCAmelCase__ : Optional[Any] = torch.manual_seed(0 )
for t in reversed(range(a ) ):
# 1. predict noise residual
lowerCAmelCase__ : List[Any] = model(a , a )
# 2. predict previous mean of sample x_t-1
lowerCAmelCase__ : Optional[int] = scheduler.step(a , a , a , generator=a ).prev_sample
lowerCAmelCase__ : str = pred_prev_sample
lowerCAmelCase__ : Optional[int] = torch.sum(torch.abs(a ) )
lowerCAmelCase__ : Any = torch.mean(torch.abs(a ) )
assert abs(result_sum.item() - 2_0_2.0_2_9_6 ) < 1E-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1E-3
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.scheduler_classes[0]
lowerCAmelCase__ : Any = self.get_scheduler_config()
lowerCAmelCase__ : Optional[int] = scheduler_class(**a )
lowerCAmelCase__ : Optional[Any] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=a )
lowerCAmelCase__ : List[Any] = scheduler.timesteps
for i, timestep in enumerate(a ):
if i == len(a ) - 1:
lowerCAmelCase__ : Tuple = -1
else:
lowerCAmelCase__ : Dict = timesteps[i + 1]
lowerCAmelCase__ : str = scheduler.previous_timestep(a )
lowerCAmelCase__ : int = prev_t.item()
self.assertEqual(a , a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.scheduler_classes[0]
lowerCAmelCase__ : Optional[int] = self.get_scheduler_config()
lowerCAmelCase__ : Optional[Any] = scheduler_class(**a )
lowerCAmelCase__ : str = [100, 87, 50, 51, 0]
with self.assertRaises(a , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=a )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.scheduler_classes[0]
lowerCAmelCase__ : str = self.get_scheduler_config()
lowerCAmelCase__ : Optional[int] = scheduler_class(**a )
lowerCAmelCase__ : str = [100, 87, 50, 1, 0]
lowerCAmelCase__ : int = len(a )
with self.assertRaises(a , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=a , timesteps=a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.scheduler_classes[0]
lowerCAmelCase__ : Dict = self.get_scheduler_config()
lowerCAmelCase__ : Optional[int] = scheduler_class(**a )
lowerCAmelCase__ : str = [scheduler.config.num_train_timesteps]
with self.assertRaises(
a , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=a ) | 69 | 1 |
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
lowerCAmelCase__ : Any = [2, 2, 6, 2] if 'tiny' in model_name else [2, 2, 18, 2]
lowerCAmelCase__ : str = True if 'large' in model_name or 'huge' in model_name else False
lowerCAmelCase__ : Optional[int] = True if 'large' in model_name or 'huge' in model_name else False
lowerCAmelCase__ : Optional[Any] = True if 'large' in model_name or 'huge' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowerCAmelCase__ : Tuple = [3, 3, 3, 3]
lowerCAmelCase__ : Tuple = [5, 5, 5, 5]
elif "fl4" in model_name:
lowerCAmelCase__ : Optional[int] = [4, 4, 4, 4]
lowerCAmelCase__ : str = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowerCAmelCase__ : Any = [3, 3, 3, 3]
if "lrf" in model_name:
lowerCAmelCase__ : List[Any] = [3, 3, 3, 3]
else:
lowerCAmelCase__ : List[str] = [2, 2, 2, 2]
if "tiny" in model_name:
lowerCAmelCase__ : str = 96
elif "small" in model_name:
lowerCAmelCase__ : Union[str, Any] = 96
elif "base" in model_name:
lowerCAmelCase__ : Tuple = 128
elif "large" in model_name:
lowerCAmelCase__ : Optional[int] = 192
elif "xlarge" in model_name:
lowerCAmelCase__ : Any = 256
elif "huge" in model_name:
lowerCAmelCase__ : Union[str, Any] = 352
# set label information
lowerCAmelCase__ : Tuple = 'huggingface/label-files'
if "large" in model_name or "huge" in model_name:
lowerCAmelCase__ : List[Any] = 'imagenet-22k-id2label.json'
else:
lowerCAmelCase__ : Optional[Any] = 'imagenet-1k-id2label.json'
lowerCAmelCase__ : Optional[int] = json.load(open(hf_hub_download(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , repo_type='dataset' ) , 'r' ) )
lowerCAmelCase__ : Dict = {int(SCREAMING_SNAKE_CASE_ ): v for k, v in idalabel.items()}
lowerCAmelCase__ : Any = {v: k for k, v in idalabel.items()}
lowerCAmelCase__ : List[Any] = FocalNetConfig(
embed_dim=SCREAMING_SNAKE_CASE_ , depths=SCREAMING_SNAKE_CASE_ , focal_levels=SCREAMING_SNAKE_CASE_ , focal_windows=SCREAMING_SNAKE_CASE_ , use_conv_embed=SCREAMING_SNAKE_CASE_ , idalabel=SCREAMING_SNAKE_CASE_ , labelaid=SCREAMING_SNAKE_CASE_ , use_post_layernorm=SCREAMING_SNAKE_CASE_ , use_layerscale=SCREAMING_SNAKE_CASE_ , )
return config
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> List[Any]:
if "patch_embed.proj" in name:
lowerCAmelCase__ : Optional[Any] = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
lowerCAmelCase__ : Tuple = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
lowerCAmelCase__ : int = 'encoder.' + name
if "encoder.layers" in name:
lowerCAmelCase__ : Any = name.replace('encoder.layers' , 'encoder.stages' )
if "downsample.proj" in name:
lowerCAmelCase__ : Optional[int] = name.replace('downsample.proj' , 'downsample.projection' )
if "blocks" in name:
lowerCAmelCase__ : Union[str, Any] = name.replace('blocks' , 'layers' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowerCAmelCase__ : Dict = name.replace('modulation.f' , 'modulation.projection_in' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowerCAmelCase__ : List[Any] = name.replace('modulation.h' , 'modulation.projection_context' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowerCAmelCase__ : Dict = name.replace('modulation.proj' , 'modulation.projection_out' )
if name == "norm.weight":
lowerCAmelCase__ : List[str] = 'layernorm.weight'
if name == "norm.bias":
lowerCAmelCase__ : int = 'layernorm.bias'
if "head" in name:
lowerCAmelCase__ : Tuple = name.replace('head' , 'classifier' )
else:
lowerCAmelCase__ : Optional[Any] = 'focalnet.' + name
return name
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_=False ) -> List[Any]:
# fmt: off
lowerCAmelCase__ : str = {
'focalnet-tiny': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth',
'focalnet-tiny-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth',
'focalnet-small': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth',
'focalnet-small-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth',
'focalnet-base': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth',
'focalnet-base-lrf': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth',
'focalnet-large-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth',
'focalnet-large-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth',
'focalnet-xlarge-lrf-fl3': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth',
'focalnet-xlarge-lrf-fl4': 'https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth',
}
# fmt: on
lowerCAmelCase__ : Optional[Any] = model_name_to_url[model_name]
print('Checkpoint URL: ' , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = torch.hub.load_state_dict_from_url(SCREAMING_SNAKE_CASE_ , map_location='cpu' )['model']
# rename keys
for key in state_dict.copy().keys():
lowerCAmelCase__ : List[Any] = state_dict.pop(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[int] = val
lowerCAmelCase__ : Any = get_focalnet_config(SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Tuple = FocalNetForImageClassification(SCREAMING_SNAKE_CASE_ )
model.eval()
# load state dict
model.load_state_dict(SCREAMING_SNAKE_CASE_ )
# verify conversion
lowerCAmelCase__ : Tuple = 'http://images.cocodataset.org/val2017/000000039769.jpg'
lowerCAmelCase__ : Any = BitImageProcessor(
do_resize=SCREAMING_SNAKE_CASE_ , size={'shortest_edge': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=SCREAMING_SNAKE_CASE_ , crop_size=224 , do_normalize=SCREAMING_SNAKE_CASE_ , image_mean=SCREAMING_SNAKE_CASE_ , image_std=SCREAMING_SNAKE_CASE_ , )
lowerCAmelCase__ : Optional[int] = Image.open(requests.get(SCREAMING_SNAKE_CASE_ , stream=SCREAMING_SNAKE_CASE_ ).raw )
lowerCAmelCase__ : Dict = processor(images=SCREAMING_SNAKE_CASE_ , return_tensors='pt' )
lowerCAmelCase__ : int = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
lowerCAmelCase__ : str = image_transforms(SCREAMING_SNAKE_CASE_ ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
lowerCAmelCase__ : Optional[Any] = model(**SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Dict = outputs.logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
print('First values of logits:' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowerCAmelCase__ : Any = torch.tensor([0.2166, -0.4368, 0.2191] )
elif model_name == "focalnet-tiny-lrf":
lowerCAmelCase__ : Tuple = torch.tensor([1.1669, 0.0125, -0.1695] )
elif model_name == "focalnet-small":
lowerCAmelCase__ : int = torch.tensor([0.4917, -0.0430, 0.1341] )
elif model_name == "focalnet-small-lrf":
lowerCAmelCase__ : Dict = torch.tensor([-0.2588, -0.5342, -0.2331] )
elif model_name == "focalnet-base":
lowerCAmelCase__ : Union[str, Any] = torch.tensor([-0.1655, -0.4090, -0.1730] )
elif model_name == "focalnet-base-lrf":
lowerCAmelCase__ : List[str] = torch.tensor([0.5306, -0.0483, -0.3928] )
assert torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE_ , atol=1e-4 )
print('Looks ok!' )
if pytorch_dump_folder_path is not None:
print(F'''Saving model and processor of {model_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
processor.save_pretrained(SCREAMING_SNAKE_CASE_ )
if push_to_hub:
print(F'''Pushing model and processor of {model_name} to the hub...''' )
model.push_to_hub(F'''{model_name}''' )
processor.push_to_hub(F'''{model_name}''' )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--model_name""",
default="""focalnet-tiny""",
type=str,
help="""Name of the FocalNet model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub.""",
)
lowerCamelCase__ = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub) | 69 |
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class A__ ( __magic_name__ ):
lowercase = ['image_processor', 'tokenizer']
lowercase = 'LayoutLMv3ImageProcessor'
lowercase = ('LayoutLMv3Tokenizer', 'LayoutLMv3TokenizerFast')
def __init__( self : Optional[int] , a : Union[str, Any]=None , a : Optional[Any]=None , **a : str ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = None
if "feature_extractor" in kwargs:
warnings.warn(
'The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`'
' instead.' , a , )
lowerCAmelCase__ : int = kwargs.pop('feature_extractor' )
lowerCAmelCase__ : Dict = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError('You need to specify an `image_processor`.' )
if tokenizer is None:
raise ValueError('You need to specify a `tokenizer`.' )
super().__init__(a , a )
def __call__( self : List[Any] , a : List[Any] , a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None , a : Optional[Union[PreTokenizedInput, List[PreTokenizedInput]]] = None , a : Union[List[List[int]], List[List[List[int]]]] = None , a : Optional[Union[List[int], List[List[int]]]] = None , a : bool = True , a : Union[bool, str, PaddingStrategy] = False , a : Union[bool, str, TruncationStrategy] = None , a : Optional[int] = None , a : int = 0 , a : Optional[int] = None , a : Optional[bool] = None , a : Optional[bool] = None , a : bool = False , a : bool = False , a : bool = False , a : bool = False , a : bool = True , a : Optional[Union[str, TensorType]] = None , **a : str , ):
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
'You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.' )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
'You cannot provide word labels if you initialized the image processor with apply_ocr set to True.' )
# first, apply the image processor
lowerCAmelCase__ : List[str] = self.image_processor(images=a , return_tensors=a )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(a , a ):
lowerCAmelCase__ : Optional[Any] = [text] # add batch dimension (as the image processor always adds a batch dimension)
lowerCAmelCase__ : List[str] = features['words']
lowerCAmelCase__ : List[Any] = self.tokenizer(
text=text if text is not None else features['words'] , text_pair=text_pair if text_pair is not None else None , boxes=boxes if boxes is not None else features['boxes'] , word_labels=a , add_special_tokens=a , padding=a , truncation=a , max_length=a , stride=a , pad_to_multiple_of=a , return_token_type_ids=a , return_attention_mask=a , return_overflowing_tokens=a , return_special_tokens_mask=a , return_offsets_mapping=a , return_length=a , verbose=a , return_tensors=a , **a , )
# add pixel values
lowerCAmelCase__ : Tuple = features.pop('pixel_values' )
if return_overflowing_tokens is True:
lowerCAmelCase__ : List[str] = self.get_overflowing_images(a , encoded_inputs['overflow_to_sample_mapping'] )
lowerCAmelCase__ : List[str] = images
return encoded_inputs
def _lowerCamelCase ( self : Any , a : List[str] , a : int ):
'''simple docstring'''
lowerCAmelCase__ : int = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(a ) != len(a ):
raise ValueError(
'Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got'
f''' {len(a )} and {len(a )}''' )
return images_with_overflow
def _lowerCamelCase ( self : Union[str, Any] , *a : Optional[Any] , **a : List[str] ):
'''simple docstring'''
return self.tokenizer.batch_decode(*a , **a )
def _lowerCamelCase ( self : Tuple , *a : List[str] , **a : Optional[Any] ):
'''simple docstring'''
return self.tokenizer.decode(*a , **a )
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
warnings.warn(
'`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.' , a , )
return self.image_processor_class
@property
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
warnings.warn(
'`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.' , a , )
return self.image_processor | 69 | 1 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> str:
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
lowerCAmelCase__ : Optional[Any] = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
lowerCAmelCase__ : Tuple = str(bin(SCREAMING_SNAKE_CASE_ ) )[2:] # remove the leading "0b"
lowerCAmelCase__ : List[str] = max(len(SCREAMING_SNAKE_CASE_ ) , len(SCREAMING_SNAKE_CASE_ ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(SCREAMING_SNAKE_CASE_ ) , b_binary.zfill(SCREAMING_SNAKE_CASE_ ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 69 |
import unittest
from transformers import SqueezeBertConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
SqueezeBertModel,
)
class A__ ( __magic_name__ ):
def __init__( self : List[str] , a : Optional[Any] , a : int=13 , a : str=7 , a : Any=True , a : List[str]=True , a : Any=False , a : List[Any]=True , a : List[str]=99 , a : Optional[Any]=32 , a : List[str]=5 , a : List[Any]=4 , a : List[Any]=64 , a : List[Any]="gelu" , a : List[Any]=0.1 , a : List[Any]=0.1 , a : int=512 , a : Tuple=16 , a : List[str]=2 , a : int=0.0_2 , a : Union[str, Any]=3 , a : Any=4 , a : Union[str, Any]=None , a : Union[str, Any]=2 , a : List[str]=2 , a : int=2 , a : Dict=2 , a : List[str]=4 , a : str=1 , ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = parent
lowerCAmelCase__ : int = batch_size
lowerCAmelCase__ : str = seq_length
lowerCAmelCase__ : Tuple = is_training
lowerCAmelCase__ : List[str] = use_input_mask
lowerCAmelCase__ : Optional[int] = use_token_type_ids
lowerCAmelCase__ : Any = use_labels
lowerCAmelCase__ : List[Any] = vocab_size
lowerCAmelCase__ : str = hidden_size
lowerCAmelCase__ : str = num_hidden_layers
lowerCAmelCase__ : List[str] = num_attention_heads
lowerCAmelCase__ : int = intermediate_size
lowerCAmelCase__ : Optional[int] = hidden_act
lowerCAmelCase__ : Optional[Any] = hidden_dropout_prob
lowerCAmelCase__ : Optional[Any] = attention_probs_dropout_prob
lowerCAmelCase__ : Union[str, Any] = max_position_embeddings
lowerCAmelCase__ : Optional[int] = type_vocab_size
lowerCAmelCase__ : Dict = type_sequence_label_size
lowerCAmelCase__ : Optional[int] = initializer_range
lowerCAmelCase__ : List[Any] = num_labels
lowerCAmelCase__ : Any = num_choices
lowerCAmelCase__ : str = scope
lowerCAmelCase__ : Any = q_groups
lowerCAmelCase__ : Any = k_groups
lowerCAmelCase__ : Union[str, Any] = v_groups
lowerCAmelCase__ : int = post_attention_groups
lowerCAmelCase__ : str = intermediate_groups
lowerCAmelCase__ : Union[str, Any] = output_groups
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
lowerCAmelCase__ : Tuple = None
if self.use_input_mask:
lowerCAmelCase__ : List[str] = random_attention_mask([self.batch_size, self.seq_length] )
lowerCAmelCase__ : List[Any] = None
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : Tuple = None
if self.use_labels:
lowerCAmelCase__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
lowerCAmelCase__ : int = ids_tensor([self.batch_size] , self.num_choices )
lowerCAmelCase__ : str = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowerCamelCase ( self : str ):
'''simple docstring'''
return SqueezeBertConfig(
embedding_size=self.hidden_size , vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , attention_probs_dropout_prob=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , q_groups=self.q_groups , k_groups=self.k_groups , v_groups=self.v_groups , post_attention_groups=self.post_attention_groups , intermediate_groups=self.intermediate_groups , output_groups=self.output_groups , )
def _lowerCamelCase ( self : Optional[int] , a : List[str] , a : List[str] , a : Any , a : Optional[int] , a : str , a : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = SqueezeBertModel(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : List[str] = model(a , a )
lowerCAmelCase__ : Any = model(a )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowerCamelCase ( self : str , a : Any , a : Tuple , a : int , a : Union[str, Any] , a : Tuple , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = SqueezeBertForMaskedLM(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Any = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowerCamelCase ( self : Optional[int] , a : Union[str, Any] , a : Optional[Any] , a : str , a : Optional[Any] , a : str , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Any = SqueezeBertForQuestionAnswering(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : List[str] = model(
a , attention_mask=a , start_positions=a , end_positions=a )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowerCamelCase ( self : Tuple , a : List[Any] , a : Optional[int] , a : Union[str, Any] , a : str , a : str , a : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.num_labels
lowerCAmelCase__ : Dict = SqueezeBertForSequenceClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Union[str, Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowerCamelCase ( self : Any , a : int , a : Any , a : Dict , a : Any , a : Tuple , a : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : str = self.num_labels
lowerCAmelCase__ : Dict = SqueezeBertForTokenClassification(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[Any] = model(a , attention_mask=a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowerCamelCase ( self : str , a : Optional[int] , a : List[Any] , a : int , a : List[Any] , a : Union[str, Any] , a : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = self.num_choices
lowerCAmelCase__ : Union[str, Any] = SqueezeBertForMultipleChoice(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : List[str] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : List[Any] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
lowerCAmelCase__ : List[str] = model(
a , attention_mask=a , labels=a , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.prepare_config_and_inputs()
((lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__) , (lowerCAmelCase__)) : List[Any] = config_and_inputs
lowerCAmelCase__ : Dict = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class A__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(
SqueezeBertModel,
SqueezeBertForMaskedLM,
SqueezeBertForMultipleChoice,
SqueezeBertForQuestionAnswering,
SqueezeBertForSequenceClassification,
SqueezeBertForTokenClassification,
)
if is_torch_available()
else None
)
lowercase = (
{
'feature-extraction': SqueezeBertModel,
'fill-mask': SqueezeBertForMaskedLM,
'question-answering': SqueezeBertForQuestionAnswering,
'text-classification': SqueezeBertForSequenceClassification,
'token-classification': SqueezeBertForTokenClassification,
'zero-shot': SqueezeBertForSequenceClassification,
}
if is_torch_available()
else {}
)
lowercase = False
lowercase = True
lowercase = False
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = SqueezeBertModelTester(self )
lowerCAmelCase__ : Dict = ConfigTester(self , config_class=a , dim=37 )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_model(*a )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_masked_lm(*a )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_question_answering(*a )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_sequence_classification(*a )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_token_classification(*a )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_squeezebert_for_multiple_choice(*a )
@slow
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
for model_name in SQUEEZEBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : Optional[int] = SqueezeBertModel.from_pretrained(a )
self.assertIsNotNone(a )
@require_sentencepiece
@require_tokenizers
@require_torch
class A__ ( unittest.TestCase ):
@slow
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : int = SqueezeBertForSequenceClassification.from_pretrained('squeezebert/squeezebert-mnli' )
lowerCAmelCase__ : str = torch.tensor([[1, 29_414, 232, 328, 740, 1_140, 12_695, 69, 13, 1_588, 2]] )
lowerCAmelCase__ : Any = model(a )[0]
lowerCAmelCase__ : Tuple = torch.Size((1, 3) )
self.assertEqual(output.shape , a )
lowerCAmelCase__ : int = torch.tensor([[0.6_4_0_1, -0.0_3_4_9, -0.6_0_4_1]] )
self.assertTrue(torch.allclose(a , a , atol=1E-4 ) ) | 69 | 1 |
from itertools import product
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> list[int]:
lowerCAmelCase__ : Dict = sides_number
lowerCAmelCase__ : Dict = max_face_number * dice_number
lowerCAmelCase__ : Optional[Any] = [0] * (max_total + 1)
lowerCAmelCase__ : Union[str, Any] = 1
lowerCAmelCase__ : List[str] = range(SCREAMING_SNAKE_CASE_ , max_face_number + 1 )
for dice_numbers in product(SCREAMING_SNAKE_CASE_ , repeat=SCREAMING_SNAKE_CASE_ ):
lowerCAmelCase__ : List[str] = sum(SCREAMING_SNAKE_CASE_ )
totals_frequencies[total] += 1
return totals_frequencies
def lowerCAmelCase__ ( ) -> float:
lowerCAmelCase__ : Optional[int] = total_frequency_distribution(
sides_number=4 , dice_number=9 )
lowerCAmelCase__ : List[str] = total_frequency_distribution(
sides_number=6 , dice_number=6 )
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ : Any = 9
lowerCAmelCase__ : Tuple = 4 * 9
lowerCAmelCase__ : Union[str, Any] = 6
for peter_total in range(SCREAMING_SNAKE_CASE_ , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
lowerCAmelCase__ : Dict = (4**9) * (6**6)
lowerCAmelCase__ : Tuple = peter_wins_count / total_games_number
lowerCAmelCase__ : Optional[int] = round(SCREAMING_SNAKE_CASE_ , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F"""{solution() = }""") | 69 |
lowerCamelCase__ = """Alexander Joslin"""
import operator as op
from .stack import Stack
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> int:
lowerCAmelCase__ : Union[str, Any] = {'*': op.mul, '/': op.truediv, '+': op.add, '-': op.sub}
lowerCAmelCase__ : Stack[int] = Stack()
lowerCAmelCase__ : Stack[str] = Stack()
for i in equation:
if i.isdigit():
# RULE 1
operand_stack.push(int(SCREAMING_SNAKE_CASE_ ) )
elif i in operators:
# RULE 2
operator_stack.push(SCREAMING_SNAKE_CASE_ )
elif i == ")":
# RULE 4
lowerCAmelCase__ : List[Any] = operator_stack.peek()
operator_stack.pop()
lowerCAmelCase__ : List[str] = operand_stack.peek()
operand_stack.pop()
lowerCAmelCase__ : List[Any] = operand_stack.peek()
operand_stack.pop()
lowerCAmelCase__ : Tuple = operators[opr](SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
operand_stack.push(SCREAMING_SNAKE_CASE_ )
# RULE 5
return operand_stack.peek()
if __name__ == "__main__":
lowerCamelCase__ = """(5 + ((4 * 2) * (2 + 3)))"""
# answer = 45
print(F"""{equation} = {dijkstras_two_stack_algorithm(equation)}""") | 69 | 1 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = 3 ) -> qiskit.result.counts.Counts:
if isinstance(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ):
raise TypeError('number of qubits must be a integer.' )
if number_of_qubits <= 0:
raise ValueError('number of qubits must be > 0.' )
if math.floor(SCREAMING_SNAKE_CASE_ ) != number_of_qubits:
raise ValueError('number of qubits must be exact integer.' )
if number_of_qubits > 10:
raise ValueError('number of qubits too large to simulate(>10).' )
lowerCAmelCase__ : Any = QuantumRegister(SCREAMING_SNAKE_CASE_ , 'qr' )
lowerCAmelCase__ : List[Any] = ClassicalRegister(SCREAMING_SNAKE_CASE_ , 'cr' )
lowerCAmelCase__ : Optional[int] = QuantumCircuit(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
lowerCAmelCase__ : Optional[Any] = number_of_qubits
for i in range(SCREAMING_SNAKE_CASE_ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(SCREAMING_SNAKE_CASE_ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(SCREAMING_SNAKE_CASE_ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# simulate with 10000 shots
lowerCAmelCase__ : str = Aer.get_backend('qasm_simulator' )
lowerCAmelCase__ : List[Any] = execute(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , shots=10_000 )
return job.result().get_counts(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
print(
F"""Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}"""
) | 69 |
import numpy
class A__ :
def __init__( self : Tuple , a : numpy.ndarray , a : numpy.ndarray ):
'''simple docstring'''
lowerCAmelCase__ : int = input_array
# Random initial weights are assigned where first argument is the
# number of nodes in previous layer and second argument is the
# number of nodes in the next layer.
# Random initial weights are assigned.
# self.input_array.shape[1] is used to represent number of nodes in input layer.
# First hidden layer consists of 4 nodes.
lowerCAmelCase__ : Dict = numpy.random.rand(
self.input_array.shape[1] , 4 )
# Random initial values for the first hidden layer.
# First hidden layer has 4 nodes.
# Second hidden layer has 3 nodes.
lowerCAmelCase__ : List[str] = numpy.random.rand(
4 , 3 )
# Random initial values for the second hidden layer.
# Second hidden layer has 3 nodes.
# Output layer has 1 node.
lowerCAmelCase__ : List[Any] = numpy.random.rand(3 , 1 )
# Real output values provided.
lowerCAmelCase__ : str = output_array
# Predicted output values by the neural network.
# Predicted_output array initially consists of zeroes.
lowerCAmelCase__ : List[Any] = numpy.zeros(output_array.shape )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : str = sigmoid(
numpy.dot(self.input_array , self.input_layer_and_first_hidden_layer_weights ) )
# layer_between_first_hidden_layer_and_second_hidden_layer is the layer
# connecting the first hidden set of nodes with the second hidden set of nodes.
lowerCAmelCase__ : Tuple = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
# layer_between_second_hidden_layer_and_output is the layer connecting
# second hidden layer with the output node.
lowerCAmelCase__ : Any = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return self.layer_between_second_hidden_layer_and_output
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer.T , 2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , )
lowerCAmelCase__ : Optional[Any] = numpy.dot(
self.layer_between_input_and_first_hidden_layer.T , numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , )
lowerCAmelCase__ : int = numpy.dot(
self.input_array.T , numpy.dot(
numpy.dot(
2
* (self.output_array - self.predicted_output)
* sigmoid_derivative(self.predicted_output ) , self.second_hidden_layer_and_output_layer_weights.T , )
* sigmoid_derivative(
self.layer_between_first_hidden_layer_and_second_hidden_layer ) , self.first_hidden_layer_and_second_hidden_layer_weights.T , )
* sigmoid_derivative(self.layer_between_input_and_first_hidden_layer ) , )
self.input_layer_and_first_hidden_layer_weights += (
updated_input_layer_and_first_hidden_layer_weights
)
self.first_hidden_layer_and_second_hidden_layer_weights += (
updated_first_hidden_layer_and_second_hidden_layer_weights
)
self.second_hidden_layer_and_output_layer_weights += (
updated_second_hidden_layer_and_output_layer_weights
)
def _lowerCamelCase ( self : Optional[int] , a : numpy.ndarray , a : int , a : bool ):
'''simple docstring'''
for iteration in range(1 , iterations + 1 ):
lowerCAmelCase__ : Any = self.feedforward()
self.back_propagation()
if give_loss:
lowerCAmelCase__ : Tuple = numpy.mean(numpy.square(output - self.feedforward() ) )
print(f'''Iteration {iteration} Loss: {loss}''' )
def _lowerCamelCase ( self : Optional[Any] , a : numpy.ndarray ):
'''simple docstring'''
lowerCAmelCase__ : Dict = input_arr
lowerCAmelCase__ : Any = sigmoid(
numpy.dot(self.array , self.input_layer_and_first_hidden_layer_weights ) )
lowerCAmelCase__ : int = sigmoid(
numpy.dot(
self.layer_between_input_and_first_hidden_layer , self.first_hidden_layer_and_second_hidden_layer_weights , ) )
lowerCAmelCase__ : List[Any] = sigmoid(
numpy.dot(
self.layer_between_first_hidden_layer_and_second_hidden_layer , self.second_hidden_layer_and_output_layer_weights , ) )
return int(self.layer_between_second_hidden_layer_and_output > 0.6 )
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> numpy.ndarray:
return 1 / (1 + numpy.exp(-value ))
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> numpy.ndarray:
return (value) * (1 - (value))
def lowerCAmelCase__ ( ) -> int:
lowerCAmelCase__ : Any = numpy.array(
(
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0],
[1, 0, 1],
[1, 1, 0],
[1, 1, 1],
) , dtype=numpy.floataa , )
# True output values for the given input values.
lowerCAmelCase__ : int = numpy.array(([0], [1], [1], [0], [1], [0], [0], [1]) , dtype=numpy.floataa )
# Calling neural network class.
lowerCAmelCase__ : List[str] = TwoHiddenLayerNeuralNetwork(
input_array=SCREAMING_SNAKE_CASE_ , output_array=SCREAMING_SNAKE_CASE_ )
# Calling training function.
# Set give_loss to True if you want to see loss in every iteration.
neural_network.train(output=SCREAMING_SNAKE_CASE_ , iterations=10 , give_loss=SCREAMING_SNAKE_CASE_ )
return neural_network.predict(numpy.array(([1, 1, 1]) , dtype=numpy.floataa ) )
if __name__ == "__main__":
example() | 69 | 1 |
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bool:
return number & 1 == 0
if __name__ == "__main__":
import doctest
doctest.testmod() | 69 |
import collections
import inspect
import unittest
from transformers import SwinvaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwinvaForImageClassification, SwinvaForMaskedImageModeling, SwinvaModel
from transformers.models.swinva.modeling_swinva import SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class A__ :
def __init__( self : int , a : str , a : Union[str, Any]=13 , a : int=32 , a : Optional[Any]=2 , a : Tuple=3 , a : List[Any]=16 , a : List[str]=[1, 2, 1] , a : int=[2, 2, 4] , a : int=2 , a : Optional[Any]=2.0 , a : Optional[int]=True , a : Dict=0.0 , a : Any=0.0 , a : int=0.1 , a : List[str]="gelu" , a : Optional[Any]=False , a : str=True , a : Dict=0.0_2 , a : Any=1E-5 , a : Optional[int]=True , a : str=None , a : str=True , a : int=10 , a : str=8 , ):
'''simple docstring'''
lowerCAmelCase__ : str = parent
lowerCAmelCase__ : Union[str, Any] = batch_size
lowerCAmelCase__ : List[str] = image_size
lowerCAmelCase__ : Optional[Any] = patch_size
lowerCAmelCase__ : Tuple = num_channels
lowerCAmelCase__ : Optional[int] = embed_dim
lowerCAmelCase__ : Tuple = depths
lowerCAmelCase__ : List[str] = num_heads
lowerCAmelCase__ : List[Any] = window_size
lowerCAmelCase__ : Any = mlp_ratio
lowerCAmelCase__ : Optional[Any] = qkv_bias
lowerCAmelCase__ : Any = hidden_dropout_prob
lowerCAmelCase__ : Optional[int] = attention_probs_dropout_prob
lowerCAmelCase__ : int = drop_path_rate
lowerCAmelCase__ : Optional[Any] = hidden_act
lowerCAmelCase__ : int = use_absolute_embeddings
lowerCAmelCase__ : List[str] = patch_norm
lowerCAmelCase__ : Optional[int] = layer_norm_eps
lowerCAmelCase__ : List[str] = initializer_range
lowerCAmelCase__ : Optional[Any] = is_training
lowerCAmelCase__ : List[Any] = scope
lowerCAmelCase__ : Dict = use_labels
lowerCAmelCase__ : List[Any] = type_sequence_label_size
lowerCAmelCase__ : Optional[Any] = encoder_stride
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
lowerCAmelCase__ : Optional[Any] = None
if self.use_labels:
lowerCAmelCase__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
lowerCAmelCase__ : int = self.get_config()
return config, pixel_values, labels
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
return SwinvaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def _lowerCamelCase ( self : List[str] , a : Any , a : str , a : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = SwinvaModel(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[int] = model(a )
lowerCAmelCase__ : str = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
lowerCAmelCase__ : Tuple = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _lowerCamelCase ( self : Union[str, Any] , a : Optional[Any] , a : Tuple , a : int ):
'''simple docstring'''
lowerCAmelCase__ : Any = SwinvaForMaskedImageModeling(config=a )
model.to(a )
model.eval()
lowerCAmelCase__ : str = model(a )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
lowerCAmelCase__ : Any = 1
lowerCAmelCase__ : Dict = SwinvaForMaskedImageModeling(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Optional[int] = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
lowerCAmelCase__ : List[str] = model(a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _lowerCamelCase ( self : Union[str, Any] , a : int , a : str , a : Any ):
'''simple docstring'''
lowerCAmelCase__ : str = self.type_sequence_label_size
lowerCAmelCase__ : List[Any] = SwinvaForImageClassification(a )
model.to(a )
model.eval()
lowerCAmelCase__ : Union[str, Any] = model(a , labels=a )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = self.prepare_config_and_inputs()
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = config_and_inputs
lowerCAmelCase__ : str = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class A__ ( __magic_name__ , __magic_name__ , unittest.TestCase ):
lowercase = (
(SwinvaModel, SwinvaForImageClassification, SwinvaForMaskedImageModeling) if is_torch_available() else ()
)
lowercase = (
{'feature-extraction': SwinvaModel, 'image-classification': SwinvaForImageClassification}
if is_torch_available()
else {}
)
lowercase = False
lowercase = False
lowercase = False
lowercase = False
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = SwinvaModelTester(self )
lowerCAmelCase__ : int = ConfigTester(self , config_class=a , embed_dim=37 )
def _lowerCamelCase ( self : List[Any] ):
'''simple docstring'''
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*a )
@unittest.skip(reason='Got `CUDA error: misaligned address` with PyTorch 2.0.0.' )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
pass
@unittest.skip(reason='Swinv2 does not use inputs_embeds' )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
pass
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : int = model_class(a )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
lowerCAmelCase__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(a , nn.Linear ) )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
lowerCAmelCase__ : Optional[int] = model_class(a )
lowerCAmelCase__ : Optional[int] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
lowerCAmelCase__ : Tuple = [*signature.parameters.keys()]
lowerCAmelCase__ : Dict = ['pixel_values']
self.assertListEqual(arg_names[:1] , a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Optional[int] = True
for model_class in self.all_model_classes:
lowerCAmelCase__ : Tuple = True
lowerCAmelCase__ : str = False
lowerCAmelCase__ : List[Any] = True
lowerCAmelCase__ : Dict = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : int = model(**self._prepare_for_class(a , a ) )
lowerCAmelCase__ : Dict = outputs.attentions
lowerCAmelCase__ : Dict = len(self.model_tester.depths )
self.assertEqual(len(a ) , a )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
lowerCAmelCase__ : List[str] = True
lowerCAmelCase__ : Optional[int] = config.window_size**2
lowerCAmelCase__ : str = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model(**self._prepare_for_class(a , a ) )
lowerCAmelCase__ : Optional[Any] = outputs.attentions
self.assertEqual(len(a ) , a )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
lowerCAmelCase__ : Tuple = len(a )
# Check attention is always last and order is fine
lowerCAmelCase__ : str = True
lowerCAmelCase__ : Union[str, Any] = True
lowerCAmelCase__ : str = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : List[Any] = model(**self._prepare_for_class(a , a ) )
if hasattr(self.model_tester , 'num_hidden_states_types' ):
lowerCAmelCase__ : Optional[Any] = self.model_tester.num_hidden_states_types
else:
# also another +1 for reshaped_hidden_states
lowerCAmelCase__ : Any = 2
self.assertEqual(out_len + added_hidden_states , len(a ) )
lowerCAmelCase__ : Dict = outputs.attentions
self.assertEqual(len(a ) , a )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_heads[0], window_size_squared, window_size_squared] , )
def _lowerCamelCase ( self : int , a : Optional[int] , a : int , a : Optional[Any] , a : List[Any] ):
'''simple docstring'''
lowerCAmelCase__ : int = model_class(a )
model.to(a )
model.eval()
with torch.no_grad():
lowerCAmelCase__ : Optional[Any] = model(**self._prepare_for_class(a , a ) )
lowerCAmelCase__ : Optional[Any] = outputs.hidden_states
lowerCAmelCase__ : str = getattr(
self.model_tester , 'expected_num_hidden_layers' , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(a ) , a )
# Swinv2 has a different seq_length
lowerCAmelCase__ : int = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase__ : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
lowerCAmelCase__ : Union[str, Any] = outputs.reshaped_hidden_states
self.assertEqual(len(a ) , a )
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : Any = reshaped_hidden_states[0].shape
lowerCAmelCase__ : List[str] = (
reshaped_hidden_states[0].view(a , a , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : str = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
lowerCAmelCase__ : Any = True
self.check_hidden_states_output(a , a , a , a )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ : List[str] = True
self.check_hidden_states_output(a , a , a , a )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Any = 3
lowerCAmelCase__ : int = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
lowerCAmelCase__ : str = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
lowerCAmelCase__ : str = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
lowerCAmelCase__ : Any = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
lowerCAmelCase__ : str = True
self.check_hidden_states_output(a , a , a , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
lowerCAmelCase__ : Any = True
self.check_hidden_states_output(a , a , a , (padded_height, padded_width) )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*a )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*a )
@slow
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
for model_name in SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
lowerCAmelCase__ : List[str] = SwinvaModel.from_pretrained(a )
self.assertIsNotNone(a )
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ , lowerCAmelCase__ : Optional[int] = self.model_tester.prepare_config_and_inputs_for_common()
lowerCAmelCase__ : Optional[int] = _config_zero_init(a )
for model_class in self.all_model_classes:
lowerCAmelCase__ : int = model_class(config=a )
for name, param in model.named_parameters():
if "embeddings" not in name and "logit_scale" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@require_vision
@require_torch
class A__ ( unittest.TestCase ):
@cached_property
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
return (
AutoImageProcessor.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' )
if is_vision_available()
else None
)
@slow
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = SwinvaForImageClassification.from_pretrained('microsoft/swinv2-tiny-patch4-window8-256' ).to(
a )
lowerCAmelCase__ : Dict = self.default_image_processor
lowerCAmelCase__ : List[str] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
lowerCAmelCase__ : Any = image_processor(images=a , return_tensors='pt' ).to(a )
# forward pass
with torch.no_grad():
lowerCAmelCase__ : Union[str, Any] = model(**a )
# verify the logits
lowerCAmelCase__ : List[str] = torch.Size((1, 1_000) )
self.assertEqual(outputs.logits.shape , a )
lowerCAmelCase__ : Optional[Any] = torch.tensor([-0.3_9_4_7, -0.4_3_0_6, 0.0_0_2_6] ).to(a )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , a , atol=1E-4 ) ) | 69 | 1 |
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) -> Optional[Any]:
# Initialise PyTorch model
lowerCAmelCase__ : int = TaConfig.from_json_file(SCREAMING_SNAKE_CASE_ )
print(F'''Building PyTorch model from configuration: {config}''' )
lowerCAmelCase__ : Tuple = TaForConditionalGeneration(SCREAMING_SNAKE_CASE_ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(SCREAMING_SNAKE_CASE_ )
if __name__ == "__main__":
lowerCamelCase__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
lowerCamelCase__ = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path) | 69 |
from itertools import permutations
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ ) -> bool:
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
lowerCAmelCase__ : str = [7, 11, 13, 17]
for i, test in enumerate(SCREAMING_SNAKE_CASE_ ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def lowerCAmelCase__ ( SCREAMING_SNAKE_CASE_ = 10 ) -> int:
return sum(
int(''.join(map(SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ ) ) )
for num in permutations(range(SCREAMING_SNAKE_CASE_ ) )
if is_substring_divisible(SCREAMING_SNAKE_CASE_ ) )
if __name__ == "__main__":
print(F"""{solution() = }""") | 69 | 1 |
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
lowerCamelCase__ = logging.get_logger(__name__)
class A__ ( __magic_name__ ):
lowercase = ['input_features', 'is_longer']
def __init__( self : Any , a : Union[str, Any]=64 , a : int=48_000 , a : int=480 , a : Any=10 , a : Union[str, Any]=1_024 , a : List[Any]=0.0 , a : Union[str, Any]=False , a : float = 0 , a : float = 14_000 , a : int = None , a : str = "fusion" , a : str = "repeatpad" , **a : Any , ):
'''simple docstring'''
super().__init__(
feature_size=a , sampling_rate=a , padding_value=a , return_attention_mask=a , **a , )
lowerCAmelCase__ : List[str] = top_db
lowerCAmelCase__ : Dict = truncation
lowerCAmelCase__ : Dict = padding
lowerCAmelCase__ : List[str] = fft_window_size
lowerCAmelCase__ : Any = (fft_window_size >> 1) + 1
lowerCAmelCase__ : Optional[Any] = hop_length
lowerCAmelCase__ : Optional[int] = max_length_s
lowerCAmelCase__ : Any = max_length_s * sampling_rate
lowerCAmelCase__ : Optional[Any] = sampling_rate
lowerCAmelCase__ : Any = frequency_min
lowerCAmelCase__ : Dict = frequency_max
lowerCAmelCase__ : Optional[Any] = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=a , min_frequency=a , max_frequency=a , sampling_rate=a , norm=a , mel_scale='htk' , )
lowerCAmelCase__ : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins , num_mel_filters=a , min_frequency=a , max_frequency=a , sampling_rate=a , norm='slaney' , mel_scale='slaney' , )
def _lowerCamelCase ( self : Dict ):
'''simple docstring'''
lowerCAmelCase__ : Any = copy.deepcopy(self.__dict__ )
lowerCAmelCase__ : Any = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _lowerCamelCase ( self : Any , a : np.array , a : Optional[np.array] = None ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = spectrogram(
a , window_function(self.fft_window_size , 'hann' ) , frame_length=self.fft_window_size , hop_length=self.hop_length , power=2.0 , mel_filters=a , log_mel='dB' , )
return log_mel_spectrogram.T
def _lowerCamelCase ( self : Any , a : Dict , a : Dict , a : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = np.array_split(list(range(0 , total_frames - chunk_frames + 1 ) ) , 3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCAmelCase__ : List[Any] = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
lowerCAmelCase__ : Optional[int] = [0]
# randomly choose index for each part
lowerCAmelCase__ : Dict = np.random.choice(ranges[0] )
lowerCAmelCase__ : Tuple = np.random.choice(ranges[1] )
lowerCAmelCase__ : Optional[int] = np.random.choice(ranges[2] )
lowerCAmelCase__ : Optional[int] = mel[idx_front : idx_front + chunk_frames, :]
lowerCAmelCase__ : Optional[int] = mel[idx_middle : idx_middle + chunk_frames, :]
lowerCAmelCase__ : Optional[int] = mel[idx_back : idx_back + chunk_frames, :]
lowerCAmelCase__ : List[Any] = torch.tensor(mel[None, None, :] )
lowerCAmelCase__ : List[str] = torch.nn.functional.interpolate(
a , size=[chunk_frames, 64] , mode='bilinear' , align_corners=a )
lowerCAmelCase__ : List[str] = mel_shrink[0][0].numpy()
lowerCAmelCase__ : Any = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] , axis=0 )
return mel_fusion
def _lowerCamelCase ( self : List[Any] , a : np.array , a : List[str] , a : int , a : Optional[int] ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
lowerCAmelCase__ : Optional[Any] = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
lowerCAmelCase__ : Optional[int] = len(a ) - max_length
lowerCAmelCase__ : int = np.random.randint(0 , overflow + 1 )
lowerCAmelCase__ : Union[str, Any] = waveform[idx : idx + max_length]
lowerCAmelCase__ : Union[str, Any] = self._np_extract_fbank_features(a , self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
lowerCAmelCase__ : Dict = self._np_extract_fbank_features(a , self.mel_filters )
lowerCAmelCase__ : List[str] = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
lowerCAmelCase__ : Union[str, Any] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
lowerCAmelCase__ : List[Any] = np.stack([mel, mel, mel, mel] , axis=0 )
lowerCAmelCase__ : Any = False
else:
lowerCAmelCase__ : Union[str, Any] = self._random_mel_fusion(a , a , a )
lowerCAmelCase__ : Tuple = True
else:
raise NotImplementedError(f'''data_truncating {truncation} not implemented''' )
else:
lowerCAmelCase__ : Dict = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
lowerCAmelCase__ : Optional[Any] = int(max_length / len(a ) )
lowerCAmelCase__ : Any = np.stack(np.tile(a , n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
lowerCAmelCase__ : str = int(max_length / len(a ) )
lowerCAmelCase__ : Tuple = np.stack(np.tile(a , a ) )
lowerCAmelCase__ : Optional[Any] = np.pad(a , (0, max_length - waveform.shape[0]) , mode='constant' , constant_values=0 )
if truncation == "fusion":
lowerCAmelCase__ : Optional[int] = self._np_extract_fbank_features(a , self.mel_filters )
lowerCAmelCase__ : Tuple = np.stack([input_mel, input_mel, input_mel, input_mel] , axis=0 )
else:
lowerCAmelCase__ : str = self._np_extract_fbank_features(a , self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : Union[str, Any] , a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , a : str = None , a : Optional[str] = None , a : Optional[int] = None , a : Optional[int] = None , a : Optional[Union[str, TensorType]] = None , **a : Optional[int] , ):
'''simple docstring'''
lowerCAmelCase__ : Any = truncation if truncation is not None else self.truncation
lowerCAmelCase__ : Any = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'''The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'''
f''' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'''
f''' was sampled with {self.sampling_rate} and not {sampling_rate}.''' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
lowerCAmelCase__ : str = isinstance(a , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'''Only mono-channel audio is supported for input to {self}''' )
lowerCAmelCase__ : Optional[Any] = is_batched_numpy or (
isinstance(a , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
lowerCAmelCase__ : List[Any] = [np.asarray(a , dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(a , np.ndarray ):
lowerCAmelCase__ : Union[str, Any] = np.asarray(a , dtype=np.floataa )
elif isinstance(a , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
lowerCAmelCase__ : Dict = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
lowerCAmelCase__ : Optional[int] = [np.asarray(a )]
# convert to mel spectrogram, truncate and pad if needed.
lowerCAmelCase__ : Optional[Any] = [
self._get_input_mel(a , max_length if max_length else self.nb_max_samples , a , a )
for waveform in raw_speech
]
lowerCAmelCase__ : Any = []
lowerCAmelCase__ : Any = []
for mel, longer in padded_inputs:
input_mel.append(a )
is_longer.append(a )
if truncation == "fusion" and sum(a ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
lowerCAmelCase__ : str = np.random.randint(0 , len(a ) )
lowerCAmelCase__ : Any = True
if isinstance(input_mel[0] , a ):
lowerCAmelCase__ : List[str] = [np.asarray(a , dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
lowerCAmelCase__ : str = [[longer] for longer in is_longer]
lowerCAmelCase__ : str = {'input_features': input_mel, 'is_longer': is_longer}
lowerCAmelCase__ : Dict = BatchFeature(a )
if return_tensors is not None:
lowerCAmelCase__ : Tuple = input_features.convert_to_tensors(a )
return input_features | 69 |
import gc
import unittest
import numpy as np
import torch
from torch.backends.cuda import sdp_kernel
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
from diffusers.utils import randn_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_a, require_torch_gpu
from ..pipeline_params import UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS, UNCONDITIONAL_IMAGE_GENERATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class A__ ( __magic_name__ , unittest.TestCase ):
lowercase = ConsistencyModelPipeline
lowercase = UNCONDITIONAL_IMAGE_GENERATION_PARAMS
lowercase = UNCONDITIONAL_IMAGE_GENERATION_BATCH_PARAMS
# Override required_optional_params to remove num_images_per_prompt
lowercase = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'output_type',
'return_dict',
'callback',
'callback_steps',
] )
@property
def _lowerCamelCase ( self : int ):
'''simple docstring'''
lowerCAmelCase__ : Dict = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet' , )
return unet
@property
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = UNetaDModel.from_pretrained(
'diffusers/consistency-models-test' , subfolder='test_unet_class_cond' , )
return unet
def _lowerCamelCase ( self : Optional[Any] , a : Union[str, Any]=False ):
'''simple docstring'''
if class_cond:
lowerCAmelCase__ : Tuple = self.dummy_cond_unet
else:
lowerCAmelCase__ : Dict = self.dummy_uncond_unet
# Default to CM multistep sampler
lowerCAmelCase__ : Optional[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
lowerCAmelCase__ : List[Any] = {
'unet': unet,
'scheduler': scheduler,
}
return components
def _lowerCamelCase ( self : int , a : Optional[int] , a : Any=0 ):
'''simple docstring'''
if str(a ).startswith('mps' ):
lowerCAmelCase__ : List[str] = torch.manual_seed(a )
else:
lowerCAmelCase__ : str = torch.Generator(device=a ).manual_seed(a )
lowerCAmelCase__ : str = {
'batch_size': 1,
'num_inference_steps': None,
'timesteps': [22, 0],
'generator': generator,
'output_type': 'np',
}
return inputs
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : Optional[Any] = self.get_dummy_components()
lowerCAmelCase__ : List[Any] = ConsistencyModelPipeline(**a )
lowerCAmelCase__ : Tuple = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : str = self.get_dummy_inputs(a )
lowerCAmelCase__ : str = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ : str = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Tuple = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : Tuple = self.get_dummy_components(class_cond=a )
lowerCAmelCase__ : Union[str, Any] = ConsistencyModelPipeline(**a )
lowerCAmelCase__ : Tuple = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[Any] = self.get_dummy_inputs(a )
lowerCAmelCase__ : int = 0
lowerCAmelCase__ : Union[str, Any] = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
lowerCAmelCase__ : str = np.array([0.3_5_7_2, 0.6_2_7_3, 0.4_0_3_1, 0.3_9_6_1, 0.4_3_2_1, 0.5_7_3_0, 0.5_2_6_6, 0.4_7_8_0, 0.5_0_0_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCamelCase ( self : Any ):
'''simple docstring'''
lowerCAmelCase__ : Tuple = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : Union[str, Any] = self.get_dummy_components()
lowerCAmelCase__ : Tuple = ConsistencyModelPipeline(**a )
lowerCAmelCase__ : Dict = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Union[str, Any] = self.get_dummy_inputs(a )
lowerCAmelCase__ : Optional[Any] = 1
lowerCAmelCase__ : Dict = None
lowerCAmelCase__ : List[Any] = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ : Tuple = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Optional[Any] = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Dict = 'cpu' # ensure determinism for the device-dependent torch.Generator
lowerCAmelCase__ : Optional[int] = self.get_dummy_components(class_cond=a )
lowerCAmelCase__ : List[Any] = ConsistencyModelPipeline(**a )
lowerCAmelCase__ : Optional[Any] = pipe.to(a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Tuple = self.get_dummy_inputs(a )
lowerCAmelCase__ : Dict = 1
lowerCAmelCase__ : Tuple = None
lowerCAmelCase__ : Optional[Any] = 0
lowerCAmelCase__ : str = pipe(**a ).images
assert image.shape == (1, 32, 32, 3)
lowerCAmelCase__ : Union[str, Any] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Dict = np.array([0.5_0_0_4, 0.5_0_0_4, 0.4_9_9_4, 0.5_0_0_8, 0.4_9_7_6, 0.5_0_1_8, 0.4_9_9_0, 0.4_9_8_2, 0.4_9_8_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@slow
@require_torch_gpu
class A__ ( unittest.TestCase ):
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowerCamelCase ( self : Optional[Any] , a : Tuple=0 , a : Optional[Any]=False , a : Optional[Any]="cpu" , a : Union[str, Any]=torch.floataa , a : Dict=(1, 3, 64, 64) ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = torch.manual_seed(a )
lowerCAmelCase__ : List[Any] = {
'num_inference_steps': None,
'timesteps': [22, 0],
'class_labels': 0,
'generator': generator,
'output_type': 'np',
}
if get_fixed_latents:
lowerCAmelCase__ : Optional[int] = self.get_fixed_latents(seed=a , device=a , dtype=a , shape=a )
lowerCAmelCase__ : Tuple = latents
return inputs
def _lowerCamelCase ( self : str , a : Tuple=0 , a : Tuple="cpu" , a : Tuple=torch.floataa , a : str=(1, 3, 64, 64) ):
'''simple docstring'''
if type(a ) == str:
lowerCAmelCase__ : str = torch.device(a )
lowerCAmelCase__ : List[str] = torch.Generator(device=a ).manual_seed(a )
lowerCAmelCase__ : Any = randn_tensor(a , generator=a , device=a , dtype=a )
return latents
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : int = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
lowerCAmelCase__ : List[str] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
lowerCAmelCase__ : List[Any] = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Optional[Any] = self.get_inputs()
lowerCAmelCase__ : Dict = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : List[str] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Union[str, Any] = np.array([0.0_8_8_8, 0.0_8_8_1, 0.0_6_6_6, 0.0_4_7_9, 0.0_2_9_2, 0.0_1_9_5, 0.0_2_0_1, 0.0_1_6_3, 0.0_2_5_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
def _lowerCamelCase ( self : str ):
'''simple docstring'''
lowerCAmelCase__ : Optional[int] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
lowerCAmelCase__ : Any = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
lowerCAmelCase__ : Optional[int] = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : List[str] = self.get_inputs()
lowerCAmelCase__ : Union[str, Any] = 1
lowerCAmelCase__ : List[str] = None
lowerCAmelCase__ : List[str] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : Optional[int] = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Union[str, Any] = np.array([0.0_3_4_0, 0.0_1_5_2, 0.0_0_6_3, 0.0_2_6_7, 0.0_2_2_1, 0.0_1_0_7, 0.0_4_1_6, 0.0_1_8_6, 0.0_2_1_7] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2E-2
@require_torch_a
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : int = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
lowerCAmelCase__ : List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
lowerCAmelCase__ : Tuple = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : str = self.get_inputs(get_fixed_latents=a , device=a )
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ):
lowerCAmelCase__ : Dict = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : str = image[0, -3:, -3:, -1]
lowerCAmelCase__ : str = np.array([0.1_8_7_5, 0.1_4_2_8, 0.1_2_8_9, 0.2_1_5_1, 0.2_0_9_2, 0.1_4_7_7, 0.1_8_7_7, 0.1_6_4_1, 0.1_3_5_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3
@require_torch_a
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = UNetaDModel.from_pretrained('diffusers/consistency_models' , subfolder='diffusers_cd_imagenet64_l2' )
lowerCAmelCase__ : List[Any] = CMStochasticIterativeScheduler(
num_train_timesteps=40 , sigma_min=0.0_0_2 , sigma_max=8_0.0 , )
lowerCAmelCase__ : Dict = ConsistencyModelPipeline(unet=a , scheduler=a )
pipe.to(torch_device=a , torch_dtype=torch.floataa )
pipe.set_progress_bar_config(disable=a )
lowerCAmelCase__ : Any = self.get_inputs(get_fixed_latents=a , device=a )
lowerCAmelCase__ : List[str] = 1
lowerCAmelCase__ : str = None
# Ensure usage of flash attention in torch 2.0
with sdp_kernel(enable_flash=a , enable_math=a , enable_mem_efficient=a ):
lowerCAmelCase__ : List[str] = pipe(**a ).images
assert image.shape == (1, 64, 64, 3)
lowerCAmelCase__ : Dict = image[0, -3:, -3:, -1]
lowerCAmelCase__ : Optional[int] = np.array([0.1_6_6_3, 0.1_9_4_8, 0.2_2_7_5, 0.1_6_8_0, 0.1_2_0_4, 0.1_2_4_5, 0.1_8_5_8, 0.1_3_3_8, 0.2_0_9_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-3 | 69 | 1 |
from typing import Dict
from .base import GenericTensor, Pipeline
class A__ ( __magic_name__ ):
def _lowerCamelCase ( self : Dict , a : Optional[int]=None , a : Optional[int]=None , a : Dict=None , **a : Optional[int] ):
'''simple docstring'''
if tokenize_kwargs is None:
lowerCAmelCase__ : List[str] = {}
if truncation is not None:
if "truncation" in tokenize_kwargs:
raise ValueError(
'truncation parameter defined twice (given as keyword argument as well as in tokenize_kwargs)' )
lowerCAmelCase__ : str = truncation
lowerCAmelCase__ : Optional[Any] = tokenize_kwargs
lowerCAmelCase__ : Tuple = {}
if return_tensors is not None:
lowerCAmelCase__ : Union[str, Any] = return_tensors
return preprocess_params, {}, postprocess_params
def _lowerCamelCase ( self : Dict , a : List[Any] , **a : Tuple ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.framework
lowerCAmelCase__ : Dict = self.tokenizer(a , return_tensors=a , **a )
return model_inputs
def _lowerCamelCase ( self : Optional[int] , a : str ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.model(**a )
return model_outputs
def _lowerCamelCase ( self : int , a : Union[str, Any] , a : Union[str, Any]=False ):
'''simple docstring'''
if return_tensors:
return model_outputs[0]
if self.framework == "pt":
return model_outputs[0].tolist()
elif self.framework == "tf":
return model_outputs[0].numpy().tolist()
def __call__( self : int , *a : Optional[Any] , **a : Any ):
'''simple docstring'''
return super().__call__(*a , **a ) | 69 |
import numpy as np
import torch
from torch.utils.data import Dataset
from utils import logger
class A__ ( __magic_name__ ):
def __init__( self : int , a : List[str] , a : List[str] ):
'''simple docstring'''
lowerCAmelCase__ : List[Any] = params
lowerCAmelCase__ : Union[str, Any] = np.array(a )
lowerCAmelCase__ : List[Any] = np.array([len(a ) for t in data] )
self.check()
self.remove_long_sequences()
self.remove_empty_sequences()
self.remove_unknown_sequences()
self.check()
self.print_statistics()
def __getitem__( self : str , a : List[str] ):
'''simple docstring'''
return (self.token_ids[index], self.lengths[index])
def __len__( self : Optional[int] ):
'''simple docstring'''
return len(self.lengths )
def _lowerCamelCase ( self : Tuple ):
'''simple docstring'''
assert len(self.token_ids ) == len(self.lengths )
assert all(self.lengths[i] == len(self.token_ids[i] ) for i in range(len(self.lengths ) ) )
def _lowerCamelCase ( self : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = self.params.max_model_input_size
lowerCAmelCase__ : Optional[int] = self.lengths > max_len
logger.info(f'''Splitting {sum(a )} too long sequences.''' )
def divide_chunks(a : List[str] , a : Tuple ):
return [l[i : i + n] for i in range(0 , len(a ) , a )]
lowerCAmelCase__ : Union[str, Any] = []
lowerCAmelCase__ : Union[str, Any] = []
if self.params.mlm:
lowerCAmelCase__ , lowerCAmelCase__ : Dict = self.params.special_tok_ids['cls_token'], self.params.special_tok_ids['sep_token']
else:
lowerCAmelCase__ , lowerCAmelCase__ : int = self.params.special_tok_ids['bos_token'], self.params.special_tok_ids['eos_token']
for seq_, len_ in zip(self.token_ids , self.lengths ):
assert (seq_[0] == cls_id) and (seq_[-1] == sep_id), seq_
if len_ <= max_len:
new_tok_ids.append(seq_ )
new_lengths.append(len_ )
else:
lowerCAmelCase__ : Optional[int] = []
for sub_s in divide_chunks(seq_ , max_len - 2 ):
if sub_s[0] != cls_id:
lowerCAmelCase__ : Dict = np.insert(a , 0 , a )
if sub_s[-1] != sep_id:
lowerCAmelCase__ : Dict = np.insert(a , len(a ) , a )
assert len(a ) <= max_len
assert (sub_s[0] == cls_id) and (sub_s[-1] == sep_id), sub_s
sub_seqs.append(a )
new_tok_ids.extend(a )
new_lengths.extend([len(a ) for l in sub_seqs] )
lowerCAmelCase__ : str = np.array(a )
lowerCAmelCase__ : Optional[Any] = np.array(a )
def _lowerCamelCase ( self : Optional[Any] ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = len(self )
lowerCAmelCase__ : List[Any] = self.lengths > 11
lowerCAmelCase__ : Dict = self.token_ids[indices]
lowerCAmelCase__ : Tuple = self.lengths[indices]
lowerCAmelCase__ : Any = len(self )
logger.info(f'''Remove {init_size - new_size} too short (<=11 tokens) sequences.''' )
def _lowerCamelCase ( self : List[str] ):
'''simple docstring'''
if "unk_token" not in self.params.special_tok_ids:
return
else:
lowerCAmelCase__ : int = self.params.special_tok_ids['unk_token']
lowerCAmelCase__ : str = len(self )
lowerCAmelCase__ : List[str] = np.array([np.count_nonzero(a == unk_token_id ) for a in self.token_ids] )
lowerCAmelCase__ : int = (unk_occs / self.lengths) < 0.5
lowerCAmelCase__ : List[str] = self.token_ids[indices]
lowerCAmelCase__ : Optional[Any] = self.lengths[indices]
lowerCAmelCase__ : Union[str, Any] = len(self )
logger.info(f'''Remove {init_size - new_size} sequences with a high level of unknown tokens (50%).''' )
def _lowerCamelCase ( self : Union[str, Any] ):
'''simple docstring'''
if not self.params.is_master:
return
logger.info(f'''{len(self )} sequences''' )
# data_len = sum(self.lengths)
# nb_unique_tokens = len(Counter(list(chain(*self.token_ids))))
# logger.info(f'{data_len} tokens ({nb_unique_tokens} unique)')
# unk_idx = self.params.special_tok_ids['unk_token']
# nb_unknown = sum([(t==unk_idx).sum() for t in self.token_ids])
# logger.info(f'{nb_unknown} unknown tokens (covering {100*nb_unknown/data_len:.2f}% of the data)')
def _lowerCamelCase ( self : int , a : Optional[int] ):
'''simple docstring'''
lowerCAmelCase__ : Optional[Any] = [t[0] for t in batch]
lowerCAmelCase__ : List[str] = [t[1] for t in batch]
assert len(a ) == len(a )
# Max for paddings
lowerCAmelCase__ : List[str] = max(a )
# Pad token ids
if self.params.mlm:
lowerCAmelCase__ : str = self.params.special_tok_ids['pad_token']
else:
lowerCAmelCase__ : Optional[int] = self.params.special_tok_ids['unk_token']
lowerCAmelCase__ : Tuple = [list(t.astype(a ) ) + [pad_idx] * (max_seq_len_ - len(a )) for t in token_ids]
assert len(tk_ ) == len(a )
assert all(len(a ) == max_seq_len_ for t in tk_ )
lowerCAmelCase__ : Union[str, Any] = torch.tensor(tk_ ) # (bs, max_seq_len_)
lowerCAmelCase__ : List[str] = torch.tensor(a ) # (bs)
return tk_t, lg_t | 69 | 1 |
import os
import time
from dataclasses import dataclass, field
from enum import Enum
from typing import Dict, List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...models.auto.modeling_auto import MODEL_FOR_QUESTION_ANSWERING_MAPPING
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
from ..processors.squad import SquadFeatures, SquadVaProcessor, SquadVaProcessor, squad_convert_examples_to_features
lowerCamelCase__ = logging.get_logger(__name__)
lowerCamelCase__ = list(MODEL_FOR_QUESTION_ANSWERING_MAPPING.keys())
lowerCamelCase__ = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class A__ :
lowercase = field(
default=__magic_name__ , metadata={'help': 'Model type selected in the list: ' + ', '.join(__magic_name__ )} )
lowercase = field(
default=__magic_name__ , metadata={'help': 'The input data dir. Should contain the .json files for the SQuAD task.'} )
lowercase = field(
default=128 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
lowercase = field(
default=128 , metadata={'help': 'When splitting up a long document into chunks, how much stride to take between chunks.'} , )
lowercase = field(
default=64 , metadata={
'help': (
'The maximum number of tokens for the question. Questions longer than this will '
'be truncated to this length.'
)
} , )
lowercase = field(
default=30 , metadata={
'help': (
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
)
} , )
lowercase = field(
default=__magic_name__ , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowercase = field(
default=__magic_name__ , metadata={'help': 'If true, the SQuAD examples contain some that do not have an answer.'} )
lowercase = field(
default=0.0 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowercase = field(
default=20 , metadata={'help': 'If null_score - best_non_null is greater than the threshold predict null.'} )
lowercase = field(
default=0 , metadata={
'help': (
'language id of input for language-specific xlm models (see'
' tokenization_xlm.PRETRAINED_INIT_CONFIGURATION)'
)
} , )
lowercase = field(default=1 , metadata={'help': 'multiple threads for converting example to features'} )
class A__ ( __magic_name__ ):
lowercase = 'train'
lowercase = 'dev'
class A__ ( __magic_name__ ):
lowercase = 42
lowercase = 42
lowercase = 42
lowercase = 42
def __init__( self : Tuple , a : SquadDataTrainingArguments , a : PreTrainedTokenizer , a : Optional[int] = None , a : Union[str, Split] = Split.train , a : Optional[bool] = False , a : Optional[str] = None , a : Optional[str] = "pt" , ):
'''simple docstring'''
lowerCAmelCase__ : Union[str, Any] = args
lowerCAmelCase__ : str = is_language_sensitive
lowerCAmelCase__ : Optional[Any] = SquadVaProcessor() if args.version_2_with_negative else SquadVaProcessor()
if isinstance(a , a ):
try:
lowerCAmelCase__ : List[str] = Split[mode]
except KeyError:
raise KeyError('mode is not a valid split name' )
lowerCAmelCase__ : Union[str, Any] = mode
# Load data features from cache or dataset file
lowerCAmelCase__ : Tuple = 'v2' if args.version_2_with_negative else 'v1'
lowerCAmelCase__ : Union[str, Any] = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{version_tag}''' , )
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
lowerCAmelCase__ : Any = cached_features_file + '.lock'
with FileLock(a ):
if os.path.exists(a ) and not args.overwrite_cache:
lowerCAmelCase__ : Any = time.time()
lowerCAmelCase__ : Optional[Any] = torch.load(a )
# Legacy cache files have only features, while new cache files
# will have dataset and examples also.
lowerCAmelCase__ : Optional[Any] = self.old_features['features']
lowerCAmelCase__ : Union[str, Any] = self.old_features.get('dataset' , a )
lowerCAmelCase__ : Optional[Any] = self.old_features.get('examples' , a )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
if self.dataset is None or self.examples is None:
logger.warning(
f'''Deleting cached file {cached_features_file} will allow dataset and examples to be cached in'''
' future run' )
else:
if mode == Split.dev:
lowerCAmelCase__ : List[str] = self.processor.get_dev_examples(args.data_dir )
else:
lowerCAmelCase__ : str = self.processor.get_train_examples(args.data_dir )
lowerCAmelCase__ , lowerCAmelCase__ : Union[str, Any] = squad_convert_examples_to_features(
examples=self.examples , tokenizer=a , max_seq_length=args.max_seq_length , doc_stride=args.doc_stride , max_query_length=args.max_query_length , is_training=mode == Split.train , threads=args.threads , return_dataset=a , )
lowerCAmelCase__ : Optional[int] = time.time()
torch.save(
{'features': self.features, 'dataset': self.dataset, 'examples': self.examples} , a , )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self : Union[str, Any] ):
'''simple docstring'''
return len(self.features )
def __getitem__( self : str , a : int ):
'''simple docstring'''
lowerCAmelCase__ : List[str] = self.features[i]
lowerCAmelCase__ : Any = torch.tensor(feature.input_ids , dtype=torch.long )
lowerCAmelCase__ : Union[str, Any] = torch.tensor(feature.attention_mask , dtype=torch.long )
lowerCAmelCase__ : str = torch.tensor(feature.token_type_ids , dtype=torch.long )
lowerCAmelCase__ : Union[str, Any] = torch.tensor(feature.cls_index , dtype=torch.long )
lowerCAmelCase__ : int = torch.tensor(feature.p_mask , dtype=torch.float )
lowerCAmelCase__ : Dict = torch.tensor(feature.is_impossible , dtype=torch.float )
lowerCAmelCase__ : Dict = {
'input_ids': input_ids,
'attention_mask': attention_mask,
'token_type_ids': token_type_ids,
}
if self.args.model_type in ["xlm", "roberta", "distilbert", "camembert"]:
del inputs["token_type_ids"]
if self.args.model_type in ["xlnet", "xlm"]:
inputs.update({'cls_index': cls_index, 'p_mask': p_mask} )
if self.args.version_2_with_negative:
inputs.update({'is_impossible': is_impossible} )
if self.is_language_sensitive:
inputs.update({'langs': (torch.ones(input_ids.shape , dtype=torch.intaa ) * self.args.lang_id)} )
if self.mode == Split.train:
lowerCAmelCase__ : Any = torch.tensor(feature.start_position , dtype=torch.long )
lowerCAmelCase__ : str = torch.tensor(feature.end_position , dtype=torch.long )
inputs.update({'start_positions': start_positions, 'end_positions': end_positions} )
return inputs | 69 |
from . import __version__
# Backward compatibility imports, to make sure all those objects can be found in file_utils
from .utils import (
CLOUDFRONT_DISTRIB_PREFIX,
CONFIG_NAME,
DISABLE_TELEMETRY,
DUMMY_INPUTS,
DUMMY_MASK,
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
FEATURE_EXTRACTOR_NAME,
FLAX_WEIGHTS_NAME,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
MODEL_CARD_NAME,
MULTIPLE_CHOICE_DUMMY_INPUTS,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
SENTENCEPIECE_UNDERLINE,
SPIECE_UNDERLINE,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
TORCH_FX_REQUIRED_VERSION,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
USE_JAX,
USE_TF,
USE_TORCH,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
ContextManagers,
DummyObject,
EntryNotFoundError,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
TensorType,
_LazyModule,
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
cached_property,
copy_func,
default_cache_path,
define_sagemaker_information,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
get_torch_version,
has_file,
http_user_agent,
is_apex_available,
is_bsa_available,
is_coloredlogs_available,
is_datasets_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_librosa_available,
is_offline_mode,
is_onnx_available,
is_pandas_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_tensor,
is_tensorflow_probability_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_training_run_on_sagemaker,
is_vision_available,
replace_return_docstrings,
requires_backends,
to_numpy,
to_py_obj,
torch_only_method,
) | 69 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.